diff options
Diffstat (limited to 'include/linux')
279 files changed, 7989 insertions, 2833 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 43b55e751dea..ebfac2fe0c81 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -49,7 +49,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) return adev ? adev->handle : NULL; } -#define ACPI_COMPANION(dev) to_acpi_node((dev)->fwnode) +#define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) @@ -69,7 +69,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) static inline bool has_acpi_companion(struct device *dev) { - return is_acpi_node(dev->fwnode); + return is_acpi_device_node(dev->fwnode); } static inline void acpi_preset_companion(struct device *dev, @@ -131,6 +131,12 @@ static inline void acpi_initrd_override(void *data, size_t size) (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + int count; +}; + char * __acpi_map_table (unsigned long phys_addr, unsigned long size); void __acpi_unmap_table(char *map, unsigned long size); int early_acpi_boot_init(void); @@ -146,9 +152,16 @@ int __init acpi_parse_entries(char *id, unsigned long table_size, struct acpi_table_header *table_header, int entry_id, unsigned int max_entries); int __init acpi_table_parse_entries(char *id, unsigned long table_size, - int entry_id, - acpi_tbl_entry_handler handler, - unsigned int max_entries); + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, + struct acpi_subtable_proc *proc, int proc_num, + unsigned int max_entries); int acpi_table_parse_madt(enum acpi_madt_type id, acpi_tbl_entry_handler handler, unsigned int max_entries); @@ -193,6 +206,12 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; extern u32 acpi_irq_not_handled; +extern unsigned int acpi_sci_irq; +#define INVALID_ACPI_IRQ ((unsigned)-1) +static inline bool acpi_sci_irq_valid(void) +{ + return acpi_sci_irq != INVALID_ACPI_IRQ; +} extern int sbf_port; extern unsigned long acpi_realmode_flags; @@ -201,6 +220,9 @@ int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); +void acpi_set_irq_model(enum acpi_irq_model_id model, + struct fwnode_handle *fwnode); + #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else @@ -462,7 +484,22 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode) return false; } -static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode) +static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) { return NULL; } @@ -749,22 +786,76 @@ struct acpi_reference_args { #ifdef CONFIG_ACPI int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); -int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, - acpi_object_type type, - const union acpi_object **obj); -int acpi_dev_get_property_reference(struct acpi_device *adev, - const char *name, size_t index, - struct acpi_reference_args *args); - -int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, - void **valptr); +int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, + struct acpi_reference_args *args); + +int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, + void **valptr); int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, void *val); +int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, void *val, size_t nval); -struct acpi_device *acpi_get_next_child(struct device *dev, - struct acpi_device *child); +struct fwnode_handle *acpi_get_next_subnode(struct device *dev, + struct fwnode_handle *subnode); + +struct acpi_probe_entry; +typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, + struct acpi_probe_entry *); + +#define ACPI_TABLE_ID_LEN 5 + +/** + * struct acpi_probe_entry - boot-time probing entry + * @id: ACPI table name + * @type: Optional subtable type to match + * (if @id contains subtables) + * @subtable_valid: Optional callback to check the validity of + * the subtable + * @probe_table: Callback to the driver being probed when table + * match is successful + * @probe_subtbl: Callback to the driver being probed when table and + * subtable match (and optional callback is successful) + * @driver_data: Sideband data provided back to the driver + */ +struct acpi_probe_entry { + __u8 id[ACPI_TABLE_ID_LEN]; + __u8 type; + acpi_probe_entry_validate_subtbl subtable_valid; + union { + acpi_tbl_table_handler probe_table; + acpi_tbl_entry_handler probe_subtbl; + }; + kernel_ulong_t driver_data; +}; + +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ + __used __section(__##table##_acpi_probe_table) \ + = { \ + .id = table_id, \ + .type = subtable, \ + .subtable_valid = valid, \ + .probe_table = (acpi_tbl_table_handler)fn, \ + .driver_data = data, \ + } + +#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table +#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end + +int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); + +#define acpi_probe_device_table(t) \ + ({ \ + extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ + ACPI_PROBE_TABLE_END(t); \ + __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ + (&ACPI_PROBE_TABLE_END(t) - \ + &ACPI_PROBE_TABLE(t))); \ + }) #else static inline int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, @@ -772,16 +863,17 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, { return -ENXIO; } -static inline int acpi_dev_get_property_array(struct acpi_device *adev, - const char *name, - acpi_object_type type, - const union acpi_object **obj) + +static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, const char *cells_name, + size_t index, struct acpi_reference_args *args) { return -ENXIO; } -static inline int acpi_dev_get_property_reference(struct acpi_device *adev, - const char *name, const char *cells_name, - size_t index, struct acpi_reference_args *args) + +static inline int acpi_node_prop_get(struct fwnode_handle *fwnode, + const char *propname, + void **valptr) { return -ENXIO; } @@ -801,6 +893,14 @@ static inline int acpi_dev_prop_read_single(struct acpi_device *adev, return -ENXIO; } +static inline int acpi_node_prop_read(struct fwnode_handle *fwnode, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + static inline int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, @@ -809,12 +909,22 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev, return -ENXIO; } -static inline struct acpi_device *acpi_get_next_child(struct device *dev, - struct acpi_device *child) +static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, + struct fwnode_handle *subnode) { return NULL; } +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ + static const void * __acpi_table_##name[] \ + __attribute__((unused)) \ + = { (void *) table_id, \ + (void *) subtable, \ + (void *) valid, \ + (void *) fn, \ + (void *) data } + +#define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h deleted file mode 100644 index f10c87265855..000000000000 --- a/include/linux/acpi_irq.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _LINUX_ACPI_IRQ_H -#define _LINUX_ACPI_IRQ_H - -#include <linux/irq.h> - -#ifndef acpi_irq_init -static inline void acpi_irq_init(void) { } -#endif - -#endif /* _LINUX_ACPI_IRQ_H */ diff --git a/include/linux/aer.h b/include/linux/aer.h index 4fef65e57023..744b997d6a94 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -42,6 +42,7 @@ struct aer_capability_regs { int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); #else static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) { @@ -55,6 +56,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) { return -EINVAL; } +static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +{ + return -EINVAL; +} #endif void cper_print_aer(struct pci_dev *dev, int cper_severity, diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 50fc66868402..9006c4e75cf7 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -41,8 +41,6 @@ struct amba_driver { int (*probe)(struct amba_device *, const struct amba_id *); int (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); - int (*suspend)(struct amba_device *, pm_message_t); - int (*resume)(struct amba_device *); const struct amba_id *id_table; }; diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h deleted file mode 100644 index df0356220730..000000000000 --- a/include/linux/arcdevice.h +++ /dev/null @@ -1,342 +0,0 @@ -/* - * INET An implementation of the TCP/IP protocol suite for the LINUX - * operating system. NET is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * Definitions used by the ARCnet driver. - * - * Authors: Avery Pennarun and David Woodhouse - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ -#ifndef _LINUX_ARCDEVICE_H -#define _LINUX_ARCDEVICE_H - -#include <asm/timex.h> -#include <linux/if_arcnet.h> - -#ifdef __KERNEL__ -#include <linux/irqreturn.h> - -/* - * RECON_THRESHOLD is the maximum number of RECON messages to receive - * within one minute before printing a "cabling problem" warning. The - * default value should be fine. - * - * After that, a "cabling restored" message will be printed on the next IRQ - * if no RECON messages have been received for 10 seconds. - * - * Do not define RECON_THRESHOLD at all if you want to disable this feature. - */ -#define RECON_THRESHOLD 30 - - -/* - * Define this to the minimum "timeout" value. If a transmit takes longer - * than TX_TIMEOUT jiffies, Linux will abort the TX and retry. On a large - * network, or one with heavy network traffic, this timeout may need to be - * increased. The larger it is, though, the longer it will be between - * necessary transmits - don't set this too high. - */ -#define TX_TIMEOUT (HZ * 200 / 1000) - - -/* Display warnings about the driver being an ALPHA version. */ -#undef ALPHA_WARNING - - -/* - * Debugging bitflags: each option can be enabled individually. - * - * Note: only debug flags included in the ARCNET_DEBUG_MAX define will - * actually be available. GCC will (at least, GCC 2.7.0 will) notice - * lines using a BUGLVL not in ARCNET_DEBUG_MAX and automatically optimize - * them out. - */ -#define D_NORMAL 1 /* important operational info */ -#define D_EXTRA 2 /* useful, but non-vital information */ -#define D_INIT 4 /* show init/probe messages */ -#define D_INIT_REASONS 8 /* show reasons for discarding probes */ -#define D_RECON 32 /* print a message whenever token is lost */ -#define D_PROTO 64 /* debug auto-protocol support */ -/* debug levels below give LOTS of output during normal operation! */ -#define D_DURING 128 /* trace operations (including irq's) */ -#define D_TX 256 /* show tx packets */ -#define D_RX 512 /* show rx packets */ -#define D_SKB 1024 /* show skb's */ -#define D_SKB_SIZE 2048 /* show skb sizes */ -#define D_TIMING 4096 /* show time needed to copy buffers to card */ -#define D_DEBUG 8192 /* Very detailed debug line for line */ - -#ifndef ARCNET_DEBUG_MAX -#define ARCNET_DEBUG_MAX (127) /* change to ~0 if you want detailed debugging */ -#endif - -#ifndef ARCNET_DEBUG -#define ARCNET_DEBUG (D_NORMAL|D_EXTRA) -#endif -extern int arcnet_debug; - -/* macros to simplify debug checking */ -#define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x)) -#define BUGMSG2(x,msg,args...) do { BUGLVL(x) printk(msg, ## args); } while (0) -#define BUGMSG(x,msg,args...) \ - BUGMSG2(x, "%s%6s: " msg, \ - x==D_NORMAL ? KERN_WARNING \ - : x < D_DURING ? KERN_INFO : KERN_DEBUG, \ - dev->name , ## args) - -/* see how long a function call takes to run, expressed in CPU cycles */ -#define TIME(name, bytes, call) BUGLVL(D_TIMING) { \ - unsigned long _x, _y; \ - _x = get_cycles(); \ - call; \ - _y = get_cycles(); \ - BUGMSG(D_TIMING, \ - "%s: %d bytes in %lu cycles == " \ - "%lu Kbytes/100Mcycle\n",\ - name, bytes, _y - _x, \ - 100000000 / 1024 * bytes / (_y - _x + 1));\ - } \ - else { \ - call;\ - } - - -/* - * Time needed to reset the card - in ms (milliseconds). This works on my - * SMC PC100. I can't find a reference that tells me just how long I - * should wait. - */ -#define RESETtime (300) - -/* - * These are the max/min lengths of packet payload, not including the - * arc_hardware header, but definitely including the soft header. - * - * Note: packet sizes 254, 255, 256 are impossible because of the way - * ARCnet registers work That's why RFC1201 defines "exception" packets. - * In non-RFC1201 protocols, we have to just tack some extra bytes on the - * end. - */ -#define MTU 253 /* normal packet max size */ -#define MinTU 257 /* extended packet min size */ -#define XMTU 508 /* extended packet max size */ - -/* status/interrupt mask bit fields */ -#define TXFREEflag 0x01 /* transmitter available */ -#define TXACKflag 0x02 /* transmitted msg. ackd */ -#define RECONflag 0x04 /* network reconfigured */ -#define TESTflag 0x08 /* test flag */ -#define EXCNAKflag 0x08 /* excesive nak flag */ -#define RESETflag 0x10 /* power-on-reset */ -#define RES1flag 0x20 /* reserved - usually set by jumper */ -#define RES2flag 0x40 /* reserved - usually set by jumper */ -#define NORXflag 0x80 /* receiver inhibited */ - -/* Flags used for IO-mapped memory operations */ -#define AUTOINCflag 0x40 /* Increase location with each access */ -#define IOMAPflag 0x02 /* (for 90xx) Use IO mapped memory, not mmap */ -#define ENABLE16flag 0x80 /* (for 90xx) Enable 16-bit mode */ - -/* in the command register, the following bits have these meanings: - * 0-2 command - * 3-4 page number (for enable rcv/xmt command) - * 7 receive broadcasts - */ -#define NOTXcmd 0x01 /* disable transmitter */ -#define NORXcmd 0x02 /* disable receiver */ -#define TXcmd 0x03 /* enable transmitter */ -#define RXcmd 0x04 /* enable receiver */ -#define CONFIGcmd 0x05 /* define configuration */ -#define CFLAGScmd 0x06 /* clear flags */ -#define TESTcmd 0x07 /* load test flags */ - -/* flags for "clear flags" command */ -#define RESETclear 0x08 /* power-on-reset */ -#define CONFIGclear 0x10 /* system reconfigured */ - -#define EXCNAKclear 0x0E /* Clear and acknowledge the excive nak bit */ - -/* flags for "load test flags" command */ -#define TESTload 0x08 /* test flag (diagnostic) */ - -/* byte deposited into first address of buffers on reset */ -#define TESTvalue 0321 /* that's octal for 0xD1 :) */ - -/* for "enable receiver" command */ -#define RXbcasts 0x80 /* receive broadcasts */ - -/* flags for "define configuration" command */ -#define NORMALconf 0x00 /* 1-249 byte packets */ -#define EXTconf 0x08 /* 250-504 byte packets */ - -/* card feature flags, set during auto-detection. - * (currently only used by com20020pci) - */ -#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */ -#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit, - but default is 2.5MBit. */ - - -/* information needed to define an encapsulation driver */ -struct ArcProto { - char suffix; /* a for RFC1201, e for ether-encap, etc. */ - int mtu; /* largest possible packet */ - int is_ip; /* This is a ip plugin - not a raw thing */ - - void (*rx) (struct net_device * dev, int bufnum, - struct archdr * pkthdr, int length); - int (*build_header) (struct sk_buff * skb, struct net_device *dev, - unsigned short ethproto, uint8_t daddr); - - /* these functions return '1' if the skb can now be freed */ - int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length, - int bufnum); - int (*continue_tx) (struct net_device * dev, int bufnum); - int (*ack_tx) (struct net_device * dev, int acked); -}; - -extern struct ArcProto *arc_proto_map[256], *arc_proto_default, - *arc_bcast_proto, *arc_raw_proto; - - -/* - * "Incoming" is information needed for each address that could be sending - * to us. Mostly for partially-received split packets. - */ -struct Incoming { - struct sk_buff *skb; /* packet data buffer */ - __be16 sequence; /* sequence number of assembly */ - uint8_t lastpacket, /* number of last packet (from 1) */ - numpackets; /* number of packets in split */ -}; - - -/* only needed for RFC1201 */ -struct Outgoing { - struct ArcProto *proto; /* protocol driver that owns this: - * if NULL, no packet is pending. - */ - struct sk_buff *skb; /* buffer from upper levels */ - struct archdr *pkt; /* a pointer into the skb */ - uint16_t length, /* bytes total */ - dataleft, /* bytes left */ - segnum, /* segment being sent */ - numsegs; /* number of segments */ -}; - - -struct arcnet_local { - uint8_t config, /* current value of CONFIG register */ - timeout, /* Extended timeout for COM20020 */ - backplane, /* Backplane flag for COM20020 */ - clockp, /* COM20020 clock divider */ - clockm, /* COM20020 clock multiplier flag */ - setup, /* Contents of setup1 register */ - setup2, /* Contents of setup2 register */ - intmask; /* current value of INTMASK register */ - uint8_t default_proto[256]; /* default encap to use for each host */ - int cur_tx, /* buffer used by current transmit, or -1 */ - next_tx, /* buffer where a packet is ready to send */ - cur_rx; /* current receive buffer */ - int lastload_dest, /* can last loaded packet be acked? */ - lasttrans_dest; /* can last TX'd packet be acked? */ - int timed_out; /* need to process TX timeout and drop packet */ - unsigned long last_timeout; /* time of last reported timeout */ - char *card_name; /* card ident string */ - int card_flags; /* special card features */ - - - /* On preemtive and SMB a lock is needed */ - spinlock_t lock; - - /* - * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of - * which can be used for either sending or receiving. The new dynamic - * buffer management routines use a simple circular queue of available - * buffers, and take them as they're needed. This way, we simplify - * situations in which we (for example) want to pre-load a transmit - * buffer, or start receiving while we copy a received packet to - * memory. - * - * The rules: only the interrupt handler is allowed to _add_ buffers to - * the queue; thus, this doesn't require a lock. Both the interrupt - * handler and the transmit function will want to _remove_ buffers, so - * we need to handle the situation where they try to do it at the same - * time. - * - * If next_buf == first_free_buf, the queue is empty. Since there are - * only four possible buffers, the queue should never be full. - */ - atomic_t buf_lock; - int buf_queue[5]; - int next_buf, first_free_buf; - - /* network "reconfiguration" handling */ - unsigned long first_recon; /* time of "first" RECON message to count */ - unsigned long last_recon; /* time of most recent RECON */ - int num_recons; /* number of RECONs between first and last. */ - int network_down; /* do we think the network is down? */ - - int excnak_pending; /* We just got an excesive nak interrupt */ - - struct { - uint16_t sequence; /* sequence number (incs with each packet) */ - __be16 aborted_seq; - - struct Incoming incoming[256]; /* one from each address */ - } rfc1201; - - /* really only used by rfc1201, but we'll pretend it's not */ - struct Outgoing outgoing; /* packet currently being sent */ - - /* hardware-specific functions */ - struct { - struct module *owner; - void (*command) (struct net_device * dev, int cmd); - int (*status) (struct net_device * dev); - void (*intmask) (struct net_device * dev, int mask); - int (*reset) (struct net_device * dev, int really_reset); - void (*open) (struct net_device * dev); - void (*close) (struct net_device * dev); - - void (*copy_to_card) (struct net_device * dev, int bufnum, int offset, - void *buf, int count); - void (*copy_from_card) (struct net_device * dev, int bufnum, int offset, - void *buf, int count); - } hw; - - void __iomem *mem_start; /* pointer to ioremap'ed MMIO */ -}; - - -#define ARCRESET(x) (lp->hw.reset(dev, (x))) -#define ACOMMAND(x) (lp->hw.command(dev, (x))) -#define ASTATUS() (lp->hw.status(dev)) -#define AINTMASK(x) (lp->hw.intmask(dev, (x))) - - - -#if ARCNET_DEBUG_MAX & D_SKB -void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); -#else -#define arcnet_dump_skb(dev,skb,desc) ; -#endif - -void arcnet_unregister_proto(struct ArcProto *proto); -irqreturn_t arcnet_interrupt(int irq, void *dev_id); -struct net_device *alloc_arcdev(const char *name); - -int arcnet_open(struct net_device *dev); -int arcnet_close(struct net_device *dev); -netdev_tx_t arcnet_send_packet(struct sk_buff *skb, - struct net_device *dev); -void arcnet_timeout(struct net_device *dev); - -#endif /* __KERNEL__ */ -#endif /* _LINUX_ARCDEVICE_H */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 00a5763e850e..301de78d65f7 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -81,6 +81,30 @@ #endif #endif /* atomic_add_return_relaxed */ +/* atomic_inc_return_relaxed */ +#ifndef atomic_inc_return_relaxed +#define atomic_inc_return_relaxed atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +#define atomic_inc_return_acquire(...) \ + __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return_release +#define atomic_inc_return_release(...) \ + __atomic_op_release(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return +#define atomic_inc_return(...) \ + __atomic_op_fence(atomic_inc_return, __VA_ARGS__) +#endif +#endif /* atomic_inc_return_relaxed */ + /* atomic_sub_return_relaxed */ #ifndef atomic_sub_return_relaxed #define atomic_sub_return_relaxed atomic_sub_return @@ -105,6 +129,30 @@ #endif #endif /* atomic_sub_return_relaxed */ +/* atomic_dec_return_relaxed */ +#ifndef atomic_dec_return_relaxed +#define atomic_dec_return_relaxed atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +#define atomic_dec_return_acquire(...) \ + __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return_release +#define atomic_dec_return_release(...) \ + __atomic_op_release(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return +#define atomic_dec_return(...) \ + __atomic_op_fence(atomic_dec_return, __VA_ARGS__) +#endif +#endif /* atomic_dec_return_relaxed */ + /* atomic_xchg_relaxed */ #ifndef atomic_xchg_relaxed #define atomic_xchg_relaxed atomic_xchg @@ -185,6 +233,31 @@ #endif #endif /* atomic64_add_return_relaxed */ +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + /* atomic64_sub_return_relaxed */ #ifndef atomic64_sub_return_relaxed #define atomic64_sub_return_relaxed atomic64_sub_return @@ -209,6 +282,30 @@ #endif #endif /* atomic64_sub_return_relaxed */ +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_relaxed #define atomic64_xchg_relaxed atomic64_xchg @@ -451,7 +548,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -#include <asm-generic/atomic-long.h> #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif @@ -463,4 +559,6 @@ static inline void atomic64_andnot(long long i, atomic64_t *v) } #endif +#include <asm-generic/atomic-long.h> + #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index b2abc996c25d..20eba1eb0a3c 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -143,7 +143,7 @@ extern void __audit_inode_child(const struct inode *parent, extern void __audit_seccomp(unsigned long syscall, long signr, int code); extern void __audit_ptrace(struct task_struct *t); -static inline int audit_dummy_context(void) +static inline bool audit_dummy_context(void) { void *p = current->audit_context; return !p || *(int *)p; @@ -345,9 +345,9 @@ static inline void audit_syscall_entry(int major, unsigned long a0, { } static inline void audit_syscall_exit(void *pt_regs) { } -static inline int audit_dummy_context(void) +static inline bool audit_dummy_context(void) { - return 1; + return true; } static inline struct filename *audit_reusename(const __user char *name) { @@ -457,7 +457,7 @@ extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp extern __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); extern void audit_log_end(struct audit_buffer *ab); -extern int audit_string_contains_control(const char *string, +extern bool audit_string_contains_control(const char *string, size_t len); extern void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index a23209b43842..1b4d69f68c33 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,8 @@ struct bdi_writeback { struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + struct list_head bdi_node; /* anchored at bdi->wb_list */ + #ifdef CONFIG_CGROUP_WRITEBACK struct percpu_ref refcnt; /* used only for !root wb's */ struct fprop_local_percpu memcg_completions; @@ -150,6 +152,7 @@ struct backing_dev_info { atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; /* the root writeback info for this bdi */ + struct list_head wb_list; /* list of all wbs */ #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index d5eb4ad1c534..c82794f20110 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -13,19 +13,22 @@ #include <linux/sched.h> #include <linux/blkdev.h> #include <linux/writeback.h> -#include <linux/memcontrol.h> #include <linux/blk-cgroup.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> int __must_check bdi_init(struct backing_dev_info *bdi); -void bdi_destroy(struct backing_dev_info *bdi); +void bdi_exit(struct backing_dev_info *bdi); __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +void bdi_unregister(struct backing_dev_info *bdi); + int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); +void bdi_destroy(struct backing_dev_info *bdi); + void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, bool range_cyclic, enum wb_reason reason); void wb_start_background_writeback(struct bdi_writeback *wb); @@ -263,8 +266,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return cgroup_on_dfl(mem_cgroup_root_css->cgroup) && - cgroup_on_dfl(blkcg_root_css->cgroup) && + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys) && bdi_cap_account_dirty(bdi) && (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); @@ -408,61 +411,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) rcu_read_unlock(); } -struct wb_iter { - int start_memcg_id; - struct radix_tree_iter tree_iter; - void **slot; -}; - -static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, - struct backing_dev_info *bdi) -{ - struct radix_tree_iter *titer = &iter->tree_iter; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - if (iter->start_memcg_id >= 0) { - iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); - iter->start_memcg_id = -1; - } else { - iter->slot = radix_tree_next_slot(iter->slot, titer, 0); - } - - if (!iter->slot) - iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); - if (iter->slot) - return *iter->slot; - return NULL; -} - -static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, - struct backing_dev_info *bdi, - int start_memcg_id) -{ - iter->start_memcg_id = start_memcg_id; - - if (start_memcg_id) - return __wb_iter_next(iter, bdi); - else - return &bdi->wb; -} - -/** - * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order - * @wb_cur: cursor struct bdi_writeback pointer - * @bdi: bdi to walk wb's of - * @iter: pointer to struct wb_iter to be used as iteration buffer - * @start_memcg_id: memcg ID to start iteration from - * - * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending - * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter - * to be used as temp storage during iteration. rcu_read_lock() must be - * held throughout iteration. - */ -#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ - for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ - (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) - #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -522,14 +470,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) { } -struct wb_iter { - int next_id; -}; - -#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ - for ((iter)->next_id = (start_blkcg_id); \ - ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) - static inline int inode_congested(struct inode *inode, int cong_bits) { return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 2ff4a9961e1d..3feb1b2d75d8 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -151,6 +151,8 @@ struct bcma_host_ops { #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ #define BCMA_CORE_USB30_DEV 0x83D #define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_ARM_CA7 0x847 +#define BCMA_CORE_SYS_MEM 0x849 #define BCMA_CORE_DEFAULT 0xFFF #define BCMA_MAX_NR_CORES 16 diff --git a/include/linux/bitops.h b/include/linux/bitops.h index e63553386ae7..2b8ed123ad36 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -164,6 +164,8 @@ static inline __u8 ror8(__u8 word, unsigned int shift) * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit * @value: value to sign extend * @index: 0 based bit index (0<=index<32) to sign bit + * + * This is safe to use for 16- and 8-bit types as well. */ static inline __s32 sign_extend32(__u32 value, int index) { @@ -171,6 +173,17 @@ static inline __s32 sign_extend32(__u32 value, int index) return (__s32)(value << shift) >> shift; } +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + static inline unsigned fls_long(unsigned long l) { if (sizeof(l) == 4) diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 0a5cc7a1109b..c02e669945e9 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); + blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 5e7d43ab61c0..83cc9d4e5455 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -166,7 +166,6 @@ enum { struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q); -void blk_mq_finish_init(struct request_queue *q); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 19c2e947d4d1..d045ca8487af 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -35,6 +35,7 @@ struct sg_io_hdr; struct bsg_job; struct blkcg_gq; struct blk_flush_queue; +struct pr_ops; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -369,6 +370,10 @@ struct request_queue { */ struct kobject mq_kobj; +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity integrity; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + #ifdef CONFIG_PM struct device *dev; int rpm_status; @@ -450,7 +455,7 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; - struct percpu_ref mq_usage_counter; + struct percpu_ref q_usage_counter; struct list_head all_q_node; struct blk_mq_tag_set *tag_set; @@ -1462,22 +1467,13 @@ struct blk_integrity_iter { typedef int (integrity_processing_fn) (struct blk_integrity_iter *); -struct blk_integrity { - integrity_processing_fn *generate_fn; - integrity_processing_fn *verify_fn; - - unsigned short flags; - unsigned short tuple_size; - unsigned short interval; - unsigned short tag_size; - - const char *name; - - struct kobject kobj; +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + const char *name; }; -extern bool blk_integrity_is_initialized(struct gendisk *); -extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); extern void blk_integrity_unregister(struct gendisk *); extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, @@ -1488,15 +1484,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, struct bio *); -static inline -struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { - return bdev->bd_disk->integrity; + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return NULL; + + return bi; } -static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) { - return disk->integrity; + return blk_get_integrity(bdev->bd_disk); } static inline bool blk_integrity_rq(struct request *rq) @@ -1570,10 +1571,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) { return 0; } -static inline int blk_integrity_register(struct gendisk *d, +static inline void blk_integrity_register(struct gendisk *d, struct blk_integrity *b) { - return 0; } static inline void blk_integrity_unregister(struct gendisk *d) { @@ -1598,10 +1598,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq, { return true; } -static inline bool blk_integrity_is_initialized(struct gendisk *g) -{ - return 0; -} + static inline bool integrity_req_gap_back_merge(struct request *req, struct bio *next) { @@ -1633,6 +1630,7 @@ struct block_device_operations { /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); struct module *owner; + const struct pr_ops *pr_ops; }; extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h new file mode 100644 index 000000000000..bef124fde61e --- /dev/null +++ b/include/linux/blkpg.h @@ -0,0 +1,21 @@ +#ifndef _LINUX_BLKPG_H +#define _LINUX_BLKPG_H + +/* + * Partition table and disk geometry handling + */ + +#include <linux/compat.h> +#include <uapi/linux/blkpg.h> + +#ifdef CONFIG_COMPAT +/* For 32-bit/64-bit compatibility of struct blkpg_ioctl_arg */ +struct blkpg_compat_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_uptr_t data; +}; +#endif + +#endif /* _LINUX_BLKPG_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f57d7fed9ec3..de464e6683b6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -10,7 +10,6 @@ #include <uapi/linux/bpf.h> #include <linux/workqueue.h> #include <linux/file.h> -#include <linux/perf_event.h> struct bpf_map; @@ -37,6 +36,8 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; + u32 pages; + struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; }; @@ -101,6 +102,8 @@ enum bpf_access_type { BPF_WRITE = 2 }; +struct bpf_prog; + struct bpf_verifier_ops { /* return eBPF function prototype for verification */ const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); @@ -112,7 +115,7 @@ struct bpf_verifier_ops { u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, - struct bpf_insn *insn); + struct bpf_insn *insn, struct bpf_prog *prog); }; struct bpf_prog_type_list { @@ -121,14 +124,13 @@ struct bpf_prog_type_list { enum bpf_prog_type type; }; -struct bpf_prog; - struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; + struct user_struct *user; union { struct work_struct work; struct rcu_head rcu; @@ -165,9 +167,18 @@ struct bpf_prog *bpf_prog_get(u32 ufd); void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put_rcu(struct bpf_prog *prog); -struct bpf_map *bpf_map_get(struct fd f); +struct bpf_map *bpf_map_get(u32 ufd); +struct bpf_map *__bpf_map_get(struct fd f); void bpf_map_put(struct bpf_map *map); +extern int sysctl_unprivileged_bpf_disabled; + +int bpf_map_new_fd(struct bpf_map *map); +int bpf_prog_new_fd(struct bpf_prog *prog); + +int bpf_obj_pin_user(u32 ufd, const char __user *pathname); +int bpf_obj_get_user(const char __user *pathname); + /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else @@ -190,7 +201,6 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto; -extern const struct bpf_func_proto bpf_perf_event_read_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; @@ -201,4 +211,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_skb_vlan_push_proto; extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; +/* Shared helpers among cBPF and eBPF. */ +void bpf_user_rnd_init_once(void); +u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 697ca7795bd9..59f4a7304419 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -30,6 +30,8 @@ #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 +#define PHY_ID_BCM_CYGNUS 0xae025200 + #define PHY_BCM_OUI_MASK 0xfffffc00 #define PHY_BCM_OUI_1 0x00206000 #define PHY_BCM_OUI_2 0x0143bc00 @@ -138,7 +140,10 @@ /* 01010: Auto Power-Down */ #define BCM54XX_SHD_APD 0x0a +#define BCM_APD_CLR_MASK 0xFE9F /* clear bits 5, 6 & 8 */ #define BCM54XX_SHD_APD_EN 0x0020 +#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */ +#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */ #define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ /* LED3 / ~LINKSPD[2] selector */ @@ -209,27 +214,13 @@ #define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ #define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ -/* - * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T - * 0x1c shadow registers. - */ -static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) -{ - phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); - return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); -} - -static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, - u16 val) -{ - return phy_write(phydev, MII_BCM54XX_SHD, - MII_BCM54XX_SHD_WRITE | - MII_BCM54XX_SHD_VAL(shadow) | - MII_BCM54XX_SHD_DATA(val)); -} - #define BRCM_CL45VEN_EEE_CONTROL 0x803d #define LPI_FEATURE_EN 0x8000 #define LPI_FEATURE_EN_DIG1000X 0x4000 +/* Core register definitions*/ +#define MII_BRCM_CORE_BASE1E 0x1E +#define MII_BRCM_CORE_EXPB0 0xB0 +#define MII_BRCM_CORE_EXPB1 0xB1 + #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index c3a9c8fc60fa..735f9f8c4e43 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -14,9 +14,10 @@ #define _CAN_DEV_H #include <linux/can.h> -#include <linux/can/netlink.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/can/netlink.h> +#include <linux/netdevice.h> /* * CAN mode @@ -77,7 +78,7 @@ struct can_priv { #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ -static inline int can_dropped_invalid_skb(struct net_device *dev, +static inline bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) { const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; @@ -93,12 +94,12 @@ static inline int can_dropped_invalid_skb(struct net_device *dev, } else goto inval_skb; - return 0; + return false; inval_skb: kfree_skb(skb); dev->stats.tx_dropped++; - return 1; + return true; } static inline bool can_is_canfd_skb(const struct sk_buff *skb) diff --git a/include/linux/can/led.h b/include/linux/can/led.h index 146de4506d21..2746f7c2f87d 100644 --- a/include/linux/can/led.h +++ b/include/linux/can/led.h @@ -11,6 +11,7 @@ #include <linux/if.h> #include <linux/leds.h> +#include <linux/netdevice.h> enum can_led_event { CAN_LED_EVENT_OPEN, diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8492721b39be..60d44b26276d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -76,6 +76,7 @@ enum { CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ + CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ @@ -83,6 +84,17 @@ enum { }; /* + * cgroup_file is the handle for a file instance created in a cgroup which + * is used, for example, to generate file changed notifications. This can + * be obtained by setting cftype->file_offset. + */ +struct cgroup_file { + /* do not access any fields from outside cgroup core */ + struct list_head node; /* anchored at css->files */ + struct kernfs_node *kn; +}; + +/* * Per-subsystem/per-cgroup state maintained by the system. This is the * fundamental structural building block that controllers deal with. * @@ -122,6 +134,9 @@ struct cgroup_subsys_state { */ u64 serial_nr; + /* all cgroup_files associated with this css */ + struct list_head files; + /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; @@ -196,6 +211,9 @@ struct css_set { */ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* For RCU-protected deletion */ struct rcu_head rcu_head; }; @@ -217,16 +235,16 @@ struct cgroup { int id; /* - * If this cgroup contains any tasks, it contributes one to - * populated_cnt. All children with non-zero popuplated_cnt of - * their own contribute one. The count is zero iff there's no task - * in this cgroup or its subtree. + * Each non-empty css_set associated with this cgroup contributes + * one to populated_cnt. All children with non-zero popuplated_cnt + * of their own contribute one. The count is zero iff there's no + * task in this cgroup or its subtree. */ int populated_cnt; struct kernfs_node *kn; /* cgroup kernfs entry */ - struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */ - struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ + struct cgroup_file procs_file; /* handle for "cgroup.procs" */ + struct cgroup_file events_file; /* handle for "cgroup.events" */ /* * The bitmask of subsystems enabled on the child cgroups. @@ -324,11 +342,6 @@ struct cftype { */ char name[MAX_CFTYPE_NAME]; unsigned long private; - /* - * If not 0, file mode is set to this value, otherwise it will - * be figured out automatically - */ - umode_t mode; /* * The maximum length of string, excluding trailing nul, that can @@ -340,6 +353,14 @@ struct cftype { unsigned int flags; /* + * If non-zero, should contain the offset from the start of css to + * a struct cgroup_file field. cgroup will record the handle of + * the created file into it. The recorded handle can be used as + * long as the containing css remains accessible. + */ + unsigned int file_offset; + + /* * Fields used for internal bookkeeping. Initialized automatically * during registration. */ @@ -414,12 +435,10 @@ struct cgroup_subsys { int (*can_fork)(struct task_struct *task, void **priv_p); void (*cancel_fork)(struct task_struct *task, void *priv); void (*fork)(struct task_struct *task, void *priv); - void (*exit)(struct cgroup_subsys_state *css, - struct cgroup_subsys_state *old_css, - struct task_struct *task); + void (*exit)(struct task_struct *task); + void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); - int disabled; int early_init; /* @@ -473,8 +492,31 @@ struct cgroup_subsys { unsigned int depends_on; }; -void cgroup_threadgroup_change_begin(struct task_struct *tsk); -void cgroup_threadgroup_change_end(struct task_struct *tsk); +extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; + +/** + * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups + * @tsk: target task + * + * Called from threadgroup_change_begin() and allows cgroup operations to + * synchronize against threadgroup changes using a percpu_rw_semaphore. + */ +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + percpu_down_read(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups + * @tsk: target task + * + * Called from threadgroup_change_end(). Counterpart of + * cgroup_threadcgroup_change_begin(). + */ +static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) +{ + percpu_up_read(&cgroup_threadgroup_rwsem); +} #else /* CONFIG_CGROUPS */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index eb7ca55f72ef..22e3754f89c5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -13,10 +13,10 @@ #include <linux/nodemask.h> #include <linux/rculist.h> #include <linux/cgroupstats.h> -#include <linux/rwsem.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kernfs.h> +#include <linux/jump_label.h> #include <linux/cgroup-defs.h> @@ -41,6 +41,10 @@ struct css_task_iter { struct list_head *task_pos; struct list_head *tasks_head; struct list_head *mg_tasks_head; + + struct css_set *cur_cset; + struct task_struct *cur_task; + struct list_head iters_node; /* css_set->task_iters */ }; extern struct cgroup_root cgrp_dfl_root; @@ -50,6 +54,26 @@ extern struct css_set init_css_set; #include <linux/cgroup_subsys.h> #undef SUBSYS +#define SUBSYS(_x) \ + extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ + extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; +#include <linux/cgroup_subsys.h> +#undef SUBSYS + +/** + * cgroup_subsys_enabled - fast test on whether a subsys is enabled + * @ss: subsystem in question + */ +#define cgroup_subsys_enabled(ss) \ + static_branch_likely(&ss ## _enabled_key) + +/** + * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy + * @ss: subsystem in question + */ +#define cgroup_subsys_on_dfl(ss) \ + static_branch_likely(&ss ## _on_dfl_key) + bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, @@ -78,6 +102,7 @@ extern void cgroup_cancel_fork(struct task_struct *p, extern void cgroup_post_fork(struct task_struct *p, void *old_ss_priv[CGROUP_CANFORK_COUNT]); void cgroup_exit(struct task_struct *p); +void cgroup_free(struct task_struct *p); int cgroup_init_early(void); int cgroup_init(void); @@ -211,11 +236,33 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each - iterate cgroup_taskset * @task: the loop cursor * @tset: taskset to iterate + * + * @tset may contain multiple tasks and they may belong to multiple + * processes. When there are multiple tasks in @tset, if a task of a + * process is in @tset, all tasks of the process are in @tset. Also, all + * are guaranteed to share the same source and destination csses. + * + * Iteration is not in any specific order. */ #define cgroup_taskset_for_each(task, tset) \ for ((task) = cgroup_taskset_first((tset)); (task); \ (task) = cgroup_taskset_next((tset))) +/** + * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset + * @leader: the loop cursor + * @tset: takset to iterate + * + * Iterate threadgroup leaders of @tset. For single-task migrations, @tset + * may not contain any. + */ +#define cgroup_taskset_for_each_leader(leader, tset) \ + for ((leader) = cgroup_taskset_first((tset)); (leader); \ + (leader) = cgroup_taskset_next((tset))) \ + if ((leader) != (leader)->group_leader) \ + ; \ + else + /* * Inline functions. */ @@ -320,11 +367,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) */ #ifdef CONFIG_PROVE_RCU extern struct mutex cgroup_mutex; -extern struct rw_semaphore css_set_rwsem; +extern spinlock_t css_set_lock; #define task_css_set_check(task, __c) \ rcu_dereference_check((task)->cgroups, \ lockdep_is_held(&cgroup_mutex) || \ - lockdep_is_held(&css_set_rwsem) || \ + lockdep_is_held(&css_set_lock) || \ ((task)->flags & PF_EXITING) || (__c)) #else #define task_css_set_check(task, __c) \ @@ -412,68 +459,10 @@ static inline struct cgroup *task_cgroup(struct task_struct *task, return task_css(task, subsys_id)->cgroup; } -/** - * cgroup_on_dfl - test whether a cgroup is on the default hierarchy - * @cgrp: the cgroup of interest - * - * The default hierarchy is the v2 interface of cgroup and this function - * can be used to test whether a cgroup is on the default hierarchy for - * cases where a subsystem should behave differnetly depending on the - * interface version. - * - * The set of behaviors which change on the default hierarchy are still - * being determined and the mount option is prefixed with __DEVEL__. - * - * List of changed behaviors: - * - * - Mount options "noprefix", "xattr", "clone_children", "release_agent" - * and "name" are disallowed. - * - * - When mounting an existing superblock, mount options should match. - * - * - Remount is disallowed. - * - * - rename(2) is disallowed. - * - * - "tasks" is removed. Everything should be at process granularity. Use - * "cgroup.procs" instead. - * - * - "cgroup.procs" is not sorted. pids will be unique unless they got - * recycled inbetween reads. - * - * - "release_agent" and "notify_on_release" are removed. Replacement - * notification mechanism will be implemented. - * - * - "cgroup.clone_children" is removed. - * - * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup - * and its descendants contain no task; otherwise, 1. The file also - * generates kernfs notification which can be monitored through poll and - * [di]notify when the value of the file changes. - * - * - cpuset: tasks will be kept in empty cpusets when hotplug happens and - * take masks of ancestors with non-empty cpus/mems, instead of being - * moved to an ancestor. - * - * - cpuset: a task can be moved into an empty cpuset, and again it takes - * masks of ancestors. - * - * - memcg: use_hierarchy is on by default and the cgroup file for the flag - * is not created. - * - * - blkcg: blk-throttle becomes properly hierarchical. - * - * - debug: disallowed on the default hierarchy. - */ -static inline bool cgroup_on_dfl(const struct cgroup *cgrp) -{ - return cgrp->root == &cgrp_dfl_root; -} - /* no synchronization, the result can only be used as a hint */ -static inline bool cgroup_has_tasks(struct cgroup *cgrp) +static inline bool cgroup_is_populated(struct cgroup *cgrp) { - return !list_empty(&cgrp->cset_links); + return cgrp->populated_cnt; } /* returns ino associated with a cgroup */ @@ -527,6 +516,19 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +/** + * cgroup_file_notify - generate a file modified event for a cgroup_file + * @cfile: target cgroup_file + * + * @cfile must have been obtained by setting cftype->file_offset. + */ +static inline void cgroup_file_notify(struct cgroup_file *cfile) +{ + /* might not have been created due to one of the CFTYPE selector flags */ + if (cfile->kn) + kernfs_notify(cfile->kn); +} + #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -546,6 +548,7 @@ static inline void cgroup_cancel_fork(struct task_struct *p, static inline void cgroup_post_fork(struct task_struct *p, void *ss_priv[CGROUP_CANFORK_COUNT]) {} static inline void cgroup_exit(struct task_struct *p) {} +static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 3ecc07d0da77..c56988ac63f7 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -500,13 +500,14 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, * * Clock with adjustable fractional divider affecting its output frequency. */ - struct clk_fractional_divider { struct clk_hw hw; void __iomem *reg; u8 mshift; + u8 mwidth; u32 mmask; u8 nshift; + u8 nwidth; u32 nmask; u8 flags; spinlock_t *lock; @@ -518,6 +519,41 @@ struct clk *clk_register_fractional_divider(struct device *dev, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock); +/** + * struct clk_multiplier - adjustable multiplier clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the multiplier + * @shift: shift to the multiplier bit field + * @width: width of the multiplier bit field + * @lock: register lock + * + * Clock with an adjustable multiplier affecting its output frequency. + * Implements .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read + * from the register, with 0 being a valid value effectively + * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is + * set, then a null multiplier will be considered as a bypass, + * leaving the parent rate unmodified. + * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be + * rounded to the closest integer instead of the down one. + */ +struct clk_multiplier { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) + +extern const struct clk_ops clk_multiplier_ops; + /*** * struct clk_composite - aggregate clock of mux, divider and gate clocks * @@ -606,7 +642,7 @@ void clk_unregister(struct clk *clk); void devm_clk_unregister(struct device *dev, struct clk *clk); /* helper functions */ -const char *__clk_get_name(struct clk *clk); +const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); struct clk_hw *__clk_get_hw(struct clk *clk); unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); @@ -618,6 +654,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); bool clk_hw_is_prepared(const struct clk_hw *hw); +bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); int __clk_mux_determine_rate(struct clk_hw *hw, @@ -690,6 +727,15 @@ static inline struct clk *of_clk_src_onecell_get( { return ERR_PTR(-ENOENT); } +static inline int of_clk_get_parent_count(struct device_node *np) +{ + return 0; +} +static inline int of_clk_parent_fill(struct device_node *np, + const char **parents, unsigned int size) +{ + return 0; +} static inline const char *of_clk_get_parent_name(struct device_node *np, int index) { diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 7669f7618f39..1e6932222e11 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -164,6 +164,7 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ +#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ @@ -182,13 +183,18 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ -#define AT91_PMC_PCR_PID (0x3f << 0) /* Peripheral ID */ -#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ -#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */ -#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */ -#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */ -#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */ -#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */ -#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_PID_MASK 0x3f +#define AT91_PMC_PCR_GCKCSS_OFFSET 8 +#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET) +#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */ +#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ +#define AT91_PMC_PCR_DIV_OFFSET 16 +#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET) +#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */ +#define AT91_PMC_PCR_GCKDIV_OFFSET 20 +#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET) +#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */ +#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ #endif diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 278dd279a7a8..7784b597e959 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -246,16 +246,13 @@ extern int clocksource_i8253_init(void); #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ OF_DECLARE_1(clksrc, name, compat, fn) -#ifdef CONFIG_CLKSRC_OF -extern void clocksource_of_init(void); +#ifdef CONFIG_CLKSRC_PROBE +extern void clocksource_probe(void); #else -static inline void clocksource_of_init(void) {} +static inline void clocksource_probe(void) {} #endif -#ifdef CONFIG_ACPI -void acpi_generic_timer_init(void); -#else -static inline void acpi_generic_timer_init(void) { } -#endif +#define CLOCKSOURCE_ACPI_DECLARE(name, table_id, fn) \ + ACPI_DECLARE_PROBE_ENTRY(clksrc, name, table_id, 0, NULL, 0, fn) #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/cma.h b/include/linux/cma.h index f7ef093ec49a..29f9e774ab76 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/com20020.h b/include/linux/com20020.h deleted file mode 100644 index 85898995b234..000000000000 --- a/include/linux/com20020.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Linux ARCnet driver - COM20020 chipset support - function declarations - * - * Written 1997 by David Woodhouse. - * Written 1994-1999 by Avery Pennarun. - * Derived from skeleton.c by Donald Becker. - * - * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) - * for sponsoring the further development of this driver. - * - * ********************** - * - * The original copyright of skeleton.c was as follows: - * - * skeleton.c Written 1993 by Donald Becker. - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. This software may only be used - * and distributed according to the terms of the GNU General Public License as - * modified by SRC, incorporated herein by reference. - * - * ********************** - * - * For more details, see drivers/net/arcnet.c - * - * ********************** - */ -#ifndef __COM20020_H -#define __COM20020_H - -int com20020_check(struct net_device *dev); -int com20020_found(struct net_device *dev, int shared); -extern const struct net_device_ops com20020_netdev_ops; - -/* The number of low I/O ports used by the card. */ -#define ARCNET_TOTAL_SIZE 8 - -/* various register addresses */ -#ifdef CONFIG_SA1100_CT6001 -#define BUS_ALIGN 2 /* 8 bit device on a 16 bit bus - needs padding */ -#else -#define BUS_ALIGN 1 -#endif - -#define PLX_PCI_MAX_CARDS 2 - -struct com20020_pci_channel_map { - u32 bar; - u32 offset; - u32 size; /* 0x00 - auto, e.g. length of entire bar */ -}; - -struct com20020_pci_card_info { - const char *name; - int devcount; - - struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS]; - - unsigned int flags; -}; - -struct com20020_priv { - struct com20020_pci_card_info *ci; - struct list_head list_dev; -}; - -struct com20020_dev { - struct list_head list; - struct net_device *dev; - - struct com20020_priv *pci_priv; - int index; -}; - -#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ -#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ -#define _COMMAND (ioaddr+BUS_ALIGN*1) /* standard arcnet commands */ -#define _DIAGSTAT (ioaddr+BUS_ALIGN*1) /* diagnostic status register */ -#define _ADDR_HI (ioaddr+BUS_ALIGN*2) /* control registers for IO-mapped memory */ -#define _ADDR_LO (ioaddr+BUS_ALIGN*3) -#define _MEMDATA (ioaddr+BUS_ALIGN*4) /* data port for IO-mapped memory */ -#define _SUBADR (ioaddr+BUS_ALIGN*5) /* the extended port _XREG refers to */ -#define _CONFIG (ioaddr+BUS_ALIGN*6) /* configuration register */ -#define _XREG (ioaddr+BUS_ALIGN*7) /* extra registers (indexed by _CONFIG - or _SUBADR) */ - -/* in the ADDR_HI register */ -#define RDDATAflag 0x80 /* next access is a read (not a write) */ - -/* in the DIAGSTAT register */ -#define NEWNXTIDflag 0x02 /* ID to which token is passed has changed */ - -/* in the CONFIG register */ -#define RESETcfg 0x80 /* put card in reset state */ -#define TXENcfg 0x20 /* enable TX */ - -/* in SETUP register */ -#define PROMISCset 0x10 /* enable RCV_ALL */ -#define P1MODE 0x80 /* enable P1-MODE for Backplane */ -#define SLOWARB 0x01 /* enable Slow Arbitration for >=5Mbps */ - -/* COM2002x */ -#define SUB_TENTATIVE 0 /* tentative node ID */ -#define SUB_NODE 1 /* node ID */ -#define SUB_SETUP1 2 /* various options */ -#define SUB_TEST 3 /* test/diag register */ - -/* COM20022 only */ -#define SUB_SETUP2 4 /* sundry options */ -#define SUB_BUSCTL 5 /* bus control options */ -#define SUB_DMACOUNT 6 /* DMA count options */ - -#define SET_SUBADR(x) do { \ - if ((x) < 4) \ - { \ - lp->config = (lp->config & ~0x03) | (x); \ - SETCONF; \ - } \ - else \ - { \ - outb(x, _SUBADR); \ - } \ -} while (0) - -#undef ARCRESET -#undef ASTATUS -#undef ACOMMAND -#undef AINTMASK - -#define ARCRESET { outb(lp->config | 0x80, _CONFIG); \ - udelay(5); \ - outb(lp->config , _CONFIG); \ - } -#define ARCRESET0 { outb(0x18 | 0x80, _CONFIG); \ - udelay(5); \ - outb(0x18 , _CONFIG); \ - } - -#define ASTATUS() inb(_STATUS) -#define ADIAGSTATUS() inb(_DIAGSTAT) -#define ACOMMAND(cmd) outb((cmd),_COMMAND) -#define AINTMASK(msk) outb((msk),_INTMASK) - -#define SETCONF outb(lp->config, _CONFIG) - -#endif /* __COM20020_H */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index aa8f61cf3a19..4cd4ddf64cc7 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -15,7 +15,8 @@ /* For more detailed tracepoint output */ #define COMPACT_NO_SUITABLE_PAGE 5 #define COMPACT_NOT_SUITABLE_ZONE 6 -/* When adding new state, please change compaction_status_string, too */ +#define COMPACT_CONTENDED 7 +/* When adding new states, please adjust include/trace/events/compaction.h */ /* Used to signal whether compaction detected need_sched() or lock contention */ /* No contention detected */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dfaa7b3e9ae9..22ab246feed3 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -205,11 +205,31 @@ #if GCC_VERSION >= 40600 /* - * Tell the optimizer that something else uses this function or variable. + * When used with Link Time Optimization, gcc can optimize away C functions or + * variables which are referenced only from assembly code. __visible tells the + * optimizer that something else uses this function or variable, thus preventing + * this. */ #define __visible __attribute__((externally_visible)) #endif + +#if GCC_VERSION >= 40900 && !defined(__CHECKER__) +/* + * __assume_aligned(n, k): Tell the optimizer that the returned + * pointer can be assumed to be k modulo n. The second argument is + * optional (default 0), so we use a variadic macro to make the + * shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + */ +#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#endif + /* * GCC 'asm goto' miscompiles certain code sequences: * @@ -237,12 +257,25 @@ #define KASAN_ABI_VERSION 3 #endif +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#endif + #endif /* gcc version >= 40000 specific checks */ #if !defined(__noclone) #define __noclone /* not needed */ #endif +#if !defined(__no_sanitize_address) +#define __no_sanitize_address +#endif + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c836eb2dc44d..4dac1036594f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -56,7 +56,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); #include <linux/compiler-gcc.h> #endif -#ifdef CC_USING_HOTPATCH +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) #define notrace __attribute__((hotpatch(0,0))) #else #define notrace __attribute__((no_instrument_function)) @@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #include <uapi/linux/types.h> -static __always_inline void __read_once_size(const volatile void *p, void *res, int size) +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) + +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) { - switch (size) { - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; - default: - barrier(); - __builtin_memcpy((void *)res, (const void *)p, size); - barrier(); - } + __READ_ONCE_SIZE; +} + +#ifdef CONFIG_KASAN +/* + * This function is not 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +static __no_sanitize_address __maybe_unused +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; } +#else +static __always_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} +#endif static __always_inline void __write_once_size(volatile void *p, void *res, int size) { @@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ -#define READ_ONCE(x) \ - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) #define WRITE_ONCE(x, val) \ ({ \ @@ -259,22 +299,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) -/** - * READ_ONCE_CTRL - Read a value heading a control dependency - * @x: The value to be read, heading the control dependency - * - * Control dependencies are tricky. See Documentation/memory-barriers.txt - * for important information on how to use them. Note that in many cases, - * use of smp_load_acquire() will be much simpler. Control dependencies - * should be avoided except on the hottest of hotpaths. - */ -#define READ_ONCE_CTRL(x) \ -({ \ - typeof(x) __val = READ_ONCE(x); \ - smp_read_barrier_depends(); /* Enforce control dependency. */ \ - __val; \ -}) - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -393,6 +417,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define __visible #endif +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/coresight.h b/include/linux/coresight.h index c69e1b932809..a7cabfa23b55 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -207,7 +207,7 @@ struct coresight_ops_link { * Operations available for sources. * @trace_id: returns the value of the component's trace ID as known to the HW. - * @enable: enables tracing from a source. + * @enable: enables tracing for a source. * @disable: disables tracing for a source. */ struct coresight_ops_source { diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h new file mode 100644 index 000000000000..363da78c4f64 --- /dev/null +++ b/include/linux/count_zeros.h @@ -0,0 +1,57 @@ +/* Count leading and trailing zeros functions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_BITOPS_COUNT_ZEROS_H_ +#define _LINUX_BITOPS_COUNT_ZEROS_H_ + +#include <asm/bitops.h> + +/** + * count_leading_zeros - Count the number of zeros from the MSB back + * @x: The value + * + * Count the number of leading zeros from the MSB going towards the LSB in @x. + * + * If the MSB of @x is set, the result is 0. + * If only the LSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_LEADING_ZEROS_0. + */ +static inline int count_leading_zeros(unsigned long x) +{ + if (sizeof(x) == 4) + return BITS_PER_LONG - fls(x); + else + return BITS_PER_LONG - fls64(x); +} + +#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG + +/** + * count_trailing_zeros - Count the number of zeros from the LSB forwards + * @x: The value + * + * Count the number of trailing zeros from the LSB going towards the MSB in @x. + * + * If the LSB of @x is set, the result is 0. + * If only the MSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0. + */ +static inline int count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +#endif /* _LINUX_BITOPS_COUNT_ZEROS_H_ */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 23c30bdcca86..d2ca8c38f9c4 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -228,7 +228,6 @@ extern struct bus_type cpu_subsys; extern void cpu_hotplug_begin(void); extern void cpu_hotplug_done(void); extern void get_online_cpus(void); -extern bool try_get_online_cpus(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); @@ -246,7 +245,6 @@ int cpu_down(unsigned int cpu); static inline void cpu_hotplug_begin(void) {} static inline void cpu_hotplug_done(void) {} #define get_online_cpus() do { } while (0) -#define try_get_online_cpus() true #define put_online_cpus() do { } while (0) #define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index dca22de98d94..ef4c5b1a860f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -65,7 +65,6 @@ struct cpufreq_policy { unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs should set cpufreq */ unsigned int cpu; /* cpu managing this policy, must be online */ - unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */ struct clk *clk; struct cpufreq_cpuinfo cpuinfo;/* see above */ @@ -149,10 +148,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy) /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ extern struct kobject *cpufreq_global_kobject; -int cpufreq_get_global_kobject(void); -void cpufreq_put_global_kobject(void); -int cpufreq_sysfs_create_file(const struct attribute *attr); -void cpufreq_sysfs_remove_file(const struct attribute *attr); #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_get(unsigned int cpu); diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 1b357997cac5..85a868ccb493 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -93,7 +93,7 @@ extern int current_cpuset_is_being_rebound(void); extern void rebuild_sched_domains(void); -extern void cpuset_print_task_mems_allowed(struct task_struct *p); +extern void cpuset_print_current_mems_allowed(void); /* * read_mems_allowed_begin is required when making decisions involving @@ -104,6 +104,9 @@ extern void cpuset_print_task_mems_allowed(struct task_struct *p); */ static inline unsigned int read_mems_allowed_begin(void) { + if (!cpusets_enabled()) + return 0; + return read_seqcount_begin(¤t->mems_allowed_seq); } @@ -115,6 +118,9 @@ static inline unsigned int read_mems_allowed_begin(void) */ static inline bool read_mems_allowed_retry(unsigned int seq) { + if (!cpusets_enabled()) + return false; + return read_seqcount_retry(¤t->mems_allowed_seq, seq); } @@ -219,7 +225,7 @@ static inline void rebuild_sched_domains(void) partition_sched_domains(1, NULL, NULL); } -static inline void cpuset_print_task_mems_allowed(struct task_struct *p) +static inline void cpuset_print_current_mems_allowed(void) { } diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 221025423e6c..61d042bbbf60 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -202,16 +202,16 @@ struct dccp_service_list { #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) #define DCCP_SERVICE_CODE_IS_ABSENT 0 -static inline int dccp_list_has_service(const struct dccp_service_list *sl, +static inline bool dccp_list_has_service(const struct dccp_service_list *sl, const __be32 service) { if (likely(sl != NULL)) { u32 i = sl->dccpsl_nr; while (i--) if (sl->dccpsl_list[i] == service) - return 1; + return true; } - return 0; + return false; } struct dccp_ackvec; diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 9beb636b97eb..19c066dce1da 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -79,6 +79,8 @@ struct dentry *debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value); struct dentry *debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value); +struct dentry *debugfs_create_ulong(const char *name, umode_t mode, + struct dentry *parent, unsigned long *value); struct dentry *debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value); struct dentry *debugfs_create_x16(const char *name, umode_t mode, @@ -92,7 +94,7 @@ struct dentry *debugfs_create_size_t(const char *name, umode_t mode, struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value); struct dentry *debugfs_create_bool(const char *name, umode_t mode, - struct dentry *parent, u32 *value); + struct dentry *parent, bool *value); struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, @@ -243,7 +245,7 @@ static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t m static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, - u32 *value) + bool *value) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 76d23fa8c7d3..ec1c61c87d89 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -79,8 +79,8 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); -typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, - unsigned long arg); +typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); /* * These iteration functions are typically used to check (and combine) @@ -156,7 +156,7 @@ struct target_type { dm_resume_fn resume; dm_status_fn status; dm_message_fn message; - dm_ioctl_fn ioctl; + dm_prepare_ioctl_fn prepare_ioctl; dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 569bbd039896..fec734df1524 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, return ret; } -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, } static inline -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order) { return NULL; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h new file mode 100644 index 000000000000..fc481037478a --- /dev/null +++ b/include/linux/dma-iommu.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2014-2015 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __DMA_IOMMU_H +#define __DMA_IOMMU_H + +#ifdef __KERNEL__ +#include <asm/errno.h> + +#ifdef CONFIG_IOMMU_DMA +#include <linux/iommu.h> + +int iommu_dma_init(void); + +/* Domain management interface for IOMMU drivers */ +int iommu_get_dma_cookie(struct iommu_domain *domain); +void iommu_put_dma_cookie(struct iommu_domain *domain); + +/* Setup call for arch DMA mapping code */ +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size); + +/* General helpers for DMA-API <-> IOMMU-API interaction */ +int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); + +/* + * These implement the bulk of the relevant DMA mapping callbacks, but require + * the arch code to take care of attributes and cache maintenance + */ +struct page **iommu_dma_alloc(struct device *dev, size_t size, + gfp_t gfp, int prot, dma_addr_t *handle, + void (*flush_page)(struct device *, const void *, phys_addr_t)); +void iommu_dma_free(struct device *dev, struct page **pages, size_t size, + dma_addr_t *handle); + +int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); + +dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, int prot); +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int prot); + +/* + * Arch code with no special attribute handling may use these + * directly as DMA mapping callbacks for simplicity + */ +void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs); +void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs); +int iommu_dma_supported(struct device *dev, u64 mask); +int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +#else + +struct iommu_domain; + +static inline int iommu_dma_init(void) +{ + return 0; +} + +static inline int iommu_get_dma_cookie(struct iommu_domain *domain) +{ + return -ENODEV; +} + +static inline void iommu_put_dma_cookie(struct iommu_domain *domain) +{ +} + +#endif /* CONFIG_IOMMU_DMA */ +#endif /* __KERNEL__ */ +#endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ac07ff090919..2e551e2d2d03 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -1,6 +1,7 @@ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H +#include <linux/sizes.h> #include <linux/string.h> #include <linux/device.h> #include <linux/err.h> @@ -145,7 +146,9 @@ static inline void arch_teardown_dma_ops(struct device *dev) { } static inline unsigned int dma_get_max_seg_size(struct device *dev) { - return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536; + if (dev->dma_parms && dev->dma_parms->max_segment_size) + return dev->dma_parms->max_segment_size; + return SZ_64K; } static inline unsigned int dma_set_max_seg_size(struct device *dev, @@ -154,14 +157,15 @@ static inline unsigned int dma_set_max_seg_size(struct device *dev, if (dev->dma_parms) { dev->dma_parms->max_segment_size = size; return 0; - } else - return -EIO; + } + return -EIO; } static inline unsigned long dma_get_seg_boundary(struct device *dev) { - return dev->dma_parms ? - dev->dma_parms->segment_boundary_mask : 0xffffffff; + if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) + return dev->dma_parms->segment_boundary_mask; + return DMA_BIT_MASK(32); } static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) @@ -169,8 +173,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) if (dev->dma_parms) { dev->dma_parms->segment_boundary_mask = mask; return 0; - } else - return -EIO; + } + return -EIO; } #ifndef dma_max_pfn diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index 234393a6997b..79df69dc629c 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h @@ -35,14 +35,23 @@ struct hsu_dma_chip { unsigned int length; unsigned int offset; struct hsu_dma *hsu; - struct hsu_dma_platform_data *pdata; }; +#if IS_ENABLED(CONFIG_HSU_DMA) /* Export to the internal users */ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); /* Export to the platform drivers */ int hsu_dma_probe(struct hsu_dma_chip *chip); int hsu_dma_remove(struct hsu_dma_chip *chip); +#else +static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, + unsigned short nr) +{ + return IRQ_NONE; +} +static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; } +static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; } +#endif /* CONFIG_HSU_DMA */ #endif /* _DMA_HSU_H */ diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 7ac17f57250e..187c10299722 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -20,6 +20,14 @@ #define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_PASS_THROUGH 2 +/* Extended context entry types */ +#define CONTEXT_TT_PT_PASID 4 +#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 +#define CONTEXT_TT_MASK (7ULL << 2) + +#define CONTEXT_DINVE (1ULL << 8) +#define CONTEXT_PRS (1ULL << 9) +#define CONTEXT_PASIDE (1ULL << 11) struct intel_iommu; struct dmar_domain; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 7ea9184eaa13..c47c68e535e8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -645,6 +645,7 @@ enum dmaengine_alignment { * The function takes a buffer of size buf_len. The callback function will * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. + * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -727,6 +728,9 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( + struct dma_chan *chan, dma_addr_t dst, u64 data, + unsigned long flags); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); diff --git a/include/linux/edac.h b/include/linux/edac.h index da3b72e95db3..4fe67b853de0 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -769,12 +769,10 @@ struct mem_ctl_info { /* the internal state of this controller instance */ int op_state; -#ifdef CONFIG_EDAC_DEBUG struct dentry *debugfs; u8 fake_inject_layer[EDAC_MAX_LAYERS]; - u32 fake_inject_ue; + bool fake_inject_ue; u16 fake_inject_count; -#endif }; /* diff --git a/include/linux/efi.h b/include/linux/efi.h index 85ef051ac6fb..569b5a866bb1 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -99,6 +99,7 @@ typedef struct { #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ #define EFI_MEMORY_MORE_RELIABLE \ ((u64)0x0000000000010000ULL) /* higher reliability */ +#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ #define EFI_MEMORY_DESCRIPTOR_VERSION 1 @@ -595,6 +596,9 @@ void efi_native_runtime_setup(void); #define DEVICE_TREE_GUID \ EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 ) +#define EFI_PROPERTIES_TABLE_GUID \ + EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 ) + typedef struct { efi_guid_t guid; u64 table; @@ -676,7 +680,7 @@ typedef struct { } efi_system_table_t; struct efi_memory_map { - void *phys_map; + phys_addr_t phys_map; void *map; void *map_end; int nr_map; @@ -808,6 +812,15 @@ typedef struct _efi_file_io_interface { #define EFI_FILE_MODE_WRITE 0x0000000000000002 #define EFI_FILE_MODE_CREATE 0x8000000000000000 +typedef struct { + u32 version; + u32 length; + u64 memory_protection_attribute; +} efi_properties_table_t; + +#define EFI_PROPERTIES_TABLE_VERSION 0x00010000 +#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 + #define EFI_INVALID_TABLE_ADDR (~0UL) /* @@ -830,6 +843,7 @@ extern struct efi { unsigned long runtime; /* runtime table */ unsigned long config_table; /* config tables */ unsigned long esrt; /* ESRT table */ + unsigned long properties_table; /* properties table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -901,13 +915,19 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern void efi_get_time(struct timespec *now); extern void efi_reserve_boot_services(void); -extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); +extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct efi_memory_map memmap; extern struct kobject *efi_kobj; extern int efi_reboot_quirk_mode; extern bool efi_poweroff_required(void); +#ifdef CONFIG_EFI_FAKE_MEMMAP +extern void __init efi_fake_memmap(void); +#else +static inline void efi_fake_memmap(void) { } +#endif + /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc(m, md) \ for ((md) = (m)->map; \ @@ -959,6 +979,7 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ #define EFI_ARCH_1 7 /* First arch-specific bit */ #define EFI_DBG 8 /* Print additional debug info at runtime */ +#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ #ifdef CONFIG_EFI /* diff --git a/include/linux/extcon.h b/include/linux/extcon.h index c0f8c4fc5d45..7abf674c388c 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -31,32 +31,42 @@ /* * Define the unique id of supported external connectors */ -#define EXTCON_NONE 0 - -#define EXTCON_USB 1 /* USB connector */ -#define EXTCON_USB_HOST 2 - -#define EXTCON_TA 3 /* Charger connector */ -#define EXTCON_FAST_CHARGER 4 -#define EXTCON_SLOW_CHARGER 5 -#define EXTCON_CHARGE_DOWNSTREAM 6 - -#define EXTCON_LINE_IN 7 /* Audio/Video connector */ -#define EXTCON_LINE_OUT 8 -#define EXTCON_MICROPHONE 9 -#define EXTCON_HEADPHONE 10 -#define EXTCON_HDMI 11 -#define EXTCON_MHL 12 -#define EXTCON_DVI 13 -#define EXTCON_VGA 14 -#define EXTCON_SPDIF_IN 15 -#define EXTCON_SPDIF_OUT 16 -#define EXTCON_VIDEO_IN 17 -#define EXTCON_VIDEO_OUT 18 - -#define EXTCON_DOCK 19 /* Misc connector */ -#define EXTCON_JIG 20 -#define EXTCON_MECHANICAL 21 +#define EXTCON_NONE 0 + +/* USB external connector */ +#define EXTCON_USB 1 +#define EXTCON_USB_HOST 2 + +/* Charging external connector */ +#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ +#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ +#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ +#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */ +#define EXTCON_CHG_USB_FAST 9 +#define EXTCON_CHG_USB_SLOW 10 + +/* Jack external connector */ +#define EXTCON_JACK_MICROPHONE 20 +#define EXTCON_JACK_HEADPHONE 21 +#define EXTCON_JACK_LINE_IN 22 +#define EXTCON_JACK_LINE_OUT 23 +#define EXTCON_JACK_VIDEO_IN 24 +#define EXTCON_JACK_VIDEO_OUT 25 +#define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */ +#define EXTCON_JACK_SPDIF_OUT 27 + +/* Display external connector */ +#define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */ +#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */ +#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */ +#define EXTCON_DISP_VGA 43 /* Video Graphics Array */ + +/* Miscellaneous external connector */ +#define EXTCON_DOCK 60 +#define EXTCON_JIG 61 +#define EXTCON_MECHANICAL 62 + +#define EXTCON_NUM 63 struct extcon_cable; diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h index 0b17ad43fbfc..7cacafb78b09 100644 --- a/include/linux/extcon/extcon-gpio.h +++ b/include/linux/extcon/extcon-gpio.h @@ -1,5 +1,5 @@ /* - * External connector (extcon) class generic GPIO driver + * Single-state GPIO extcon driver based on extcon class * * Copyright (C) 2012 Samsung Electronics * Author: MyungJoo Ham <myungjoo.ham@samsung.com> @@ -16,43 +16,31 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * -*/ + */ #ifndef __EXTCON_GPIO_H__ #define __EXTCON_GPIO_H__ __FILE__ #include <linux/extcon.h> /** - * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device. - * @name: The name of this GPIO extcon device. + * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device. + * @extcon_id: The unique id of specific external connector. * @gpio: Corresponding GPIO. * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0 * If true, low state of gpio means active. * If false, high state of gpio means active. * @debounce: Debounce time for GPIO IRQ in ms. * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). - * @state_on: print_state is overriden with state_on if attached. - * If NULL, default method of extcon class is used. - * @state_off: print_state is overriden with state_off if detached. - * If NUll, default method of extcon class is used. * @check_on_resume: Boolean describing whether to check the state of gpio * while resuming from sleep. - * - * Note that in order for state_on or state_off to be valid, both state_on - * and state_off should be not NULL. If at least one of them is NULL, - * the print_state is not overriden. */ -struct gpio_extcon_platform_data { - const char *name; +struct gpio_extcon_pdata { + unsigned int extcon_id; unsigned gpio; bool gpio_active_low; unsigned long debounce; unsigned long irq_flags; - /* if NULL, "0" or "1" will be printed */ - const char *state_on; - const char *state_off; bool check_on_resume; }; diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 798fad9e420d..3159a7dba034 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -18,7 +18,7 @@ struct fault_attr { atomic_t times; atomic_t space; unsigned long verbose; - u32 task_filter; + bool task_filter; unsigned long stacktrace_depth; unsigned long require_start; unsigned long require_end; diff --git a/include/linux/fb.h b/include/linux/fb.h index bc9afa74ee11..3d003805aac3 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -156,7 +156,7 @@ struct fb_cursor_user { #define FB_EVENT_GET_REQ 0x0D /* Unbind from the console if possible */ #define FB_EVENT_FB_UNBIND 0x0E -/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */ +/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */ #define FB_EVENT_REMAP_ALL_CONSOLE 0x0F /* A hardware display blank early change occured */ #define FB_EARLY_EVENT_BLANK 0x10 @@ -483,7 +483,10 @@ struct fb_info { #ifdef CONFIG_FB_TILEBLITTING struct fb_tile_ops *tileops; /* Tile Blitting */ #endif - char __iomem *screen_base; /* Virtual address */ + union { + char __iomem *screen_base; /* Virtual address */ + char *screen_buffer; + }; unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ void *pseudo_palette; /* Fake palette of 16 colors */ #define FBINFO_STATE_RUNNING 0 diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 674e3e226465..5295535b60c6 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -26,6 +26,7 @@ struct fdtable { struct file __rcu **fd; /* current fd array */ unsigned long *close_on_exec; unsigned long *open_fds; + unsigned long *full_fds_bits; struct rcu_head rcu; }; @@ -59,6 +60,7 @@ struct files_struct { int next_fd; unsigned long close_on_exec_init[1]; unsigned long open_fds_init[1]; + unsigned long full_fds_bits_init[1]; struct file __rcu * fd_array[NR_OPEN_DEFAULT]; }; diff --git a/include/linux/fence.h b/include/linux/fence.h index 39efee130d2b..bb522011383b 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -280,6 +280,22 @@ fence_is_signaled(struct fence *fence) } /** + * fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool fence_is_later(struct fence *f1, struct fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return f1->seqno - f2->seqno < INT_MAX; +} + +/** * fence_later - return the chronologically later fence * @f1: [in] the first fence from the same context * @f2: [in] the second fence from the same context @@ -298,14 +314,15 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2) * set if enable_signaling wasn't called, and enabling that here is * overkill. */ - if (f2->seqno - f1->seqno <= INT_MAX) - return fence_is_signaled(f2) ? NULL : f2; - else + if (fence_is_later(f1, f2)) return fence_is_signaled(f1) ? NULL : f1; + else + return fence_is_signaled(f2) ? NULL : f2; } signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); - +signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, + bool intr, signed long timeout); /** * fence_wait - sleep until the fence gets signaled diff --git a/include/linux/filter.h b/include/linux/filter.h index fa2cab985e57..4165e9ac9e36 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -13,6 +13,7 @@ #include <linux/printk.h> #include <linux/workqueue.h> #include <linux/sched.h> +#include <net/sch_generic.h> #include <asm/cacheflush.h> @@ -302,10 +303,6 @@ struct bpf_prog_aux; bpf_size; \ }) -/* Macro to invoke filter function. */ -#define SK_RUN_FILTER(filter, ctx) \ - (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) - #ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { @@ -326,8 +323,12 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ - bool jited; /* Is our filter JIT'ed? */ - bool gpl_compatible; /* Is our filter GPL compatible? */ + kmemcheck_bitfield_begin(meta); + u16 jited:1, /* Is our filter JIT'ed? */ + gpl_compatible:1, /* Is filter GPL compatible? */ + cb_access:1, /* Is control block accessed? */ + dst_needed:1; /* Do we need dst entry? */ + kmemcheck_bitfield_end(meta); u32 len; /* Number of filter blocks */ enum bpf_prog_type type; /* Type of BPF program */ struct bpf_prog_aux *aux; /* Auxiliary fields */ @@ -349,6 +350,39 @@ struct sk_filter { #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) +static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = qdisc_skb_cb(skb)->data; + u8 saved_cb[QDISC_CB_PRIV_LEN]; + u32 res; + + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != + QDISC_CB_PRIV_LEN); + + if (unlikely(prog->cb_access)) { + memcpy(saved_cb, cb_data, sizeof(saved_cb)); + memset(cb_data, 0, sizeof(saved_cb)); + } + + res = BPF_PROG_RUN(prog, skb); + + if (unlikely(prog->cb_access)) + memcpy(cb_data, saved_cb, sizeof(saved_cb)); + + return res; +} + +static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = qdisc_skb_cb(skb)->data; + + if (unlikely(prog->cb_access)) + memset(cb_data, 0, QDISC_CB_PRIV_LEN); + return BPF_PROG_RUN(prog, skb); +} + static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), @@ -408,7 +442,7 @@ typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, - bpf_aux_classic_check_t trans); + bpf_aux_classic_check_t trans, bool save_orig); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h new file mode 100644 index 000000000000..0940bf45e2f2 --- /dev/null +++ b/include/linux/fpga/fpga-mgr.h @@ -0,0 +1,127 @@ +/* + * FPGA Framework + * + * Copyright (C) 2013-2015 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/mutex.h> +#include <linux/platform_device.h> + +#ifndef _LINUX_FPGA_MGR_H +#define _LINUX_FPGA_MGR_H + +struct fpga_manager; + +/** + * enum fpga_mgr_states - fpga framework states + * @FPGA_MGR_STATE_UNKNOWN: can't determine state + * @FPGA_MGR_STATE_POWER_OFF: FPGA power is off + * @FPGA_MGR_STATE_POWER_UP: FPGA reports power is up + * @FPGA_MGR_STATE_RESET: FPGA in reset state + * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress + * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed + * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming + * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage + * @FPGA_MGR_STATE_WRITE: writing image to FPGA + * @FPGA_MGR_STATE_WRITE_ERR: Error while writing FPGA + * @FPGA_MGR_STATE_WRITE_COMPLETE: Doing post programming steps + * @FPGA_MGR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE + * @FPGA_MGR_STATE_OPERATING: FPGA is programmed and operating + */ +enum fpga_mgr_states { + /* default FPGA states */ + FPGA_MGR_STATE_UNKNOWN, + FPGA_MGR_STATE_POWER_OFF, + FPGA_MGR_STATE_POWER_UP, + FPGA_MGR_STATE_RESET, + + /* getting an image for loading */ + FPGA_MGR_STATE_FIRMWARE_REQ, + FPGA_MGR_STATE_FIRMWARE_REQ_ERR, + + /* write sequence: init, write, complete */ + FPGA_MGR_STATE_WRITE_INIT, + FPGA_MGR_STATE_WRITE_INIT_ERR, + FPGA_MGR_STATE_WRITE, + FPGA_MGR_STATE_WRITE_ERR, + FPGA_MGR_STATE_WRITE_COMPLETE, + FPGA_MGR_STATE_WRITE_COMPLETE_ERR, + + /* fpga is programmed and operating */ + FPGA_MGR_STATE_OPERATING, +}; + +/* + * FPGA Manager flags + * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported + */ +#define FPGA_MGR_PARTIAL_RECONFIG BIT(0) + +/** + * struct fpga_manager_ops - ops for low level fpga manager drivers + * @state: returns an enum value of the FPGA's state + * @write_init: prepare the FPGA to receive confuration data + * @write: write count bytes of configuration data to the FPGA + * @write_complete: set FPGA to operating state after writing is done + * @fpga_remove: optional: Set FPGA into a specific state during driver remove + * + * fpga_manager_ops are the low level functions implemented by a specific + * fpga manager driver. The optional ones are tested for NULL before being + * called, so leaving them out is fine. + */ +struct fpga_manager_ops { + enum fpga_mgr_states (*state)(struct fpga_manager *mgr); + int (*write_init)(struct fpga_manager *mgr, u32 flags, + const char *buf, size_t count); + int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); + int (*write_complete)(struct fpga_manager *mgr, u32 flags); + void (*fpga_remove)(struct fpga_manager *mgr); +}; + +/** + * struct fpga_manager - fpga manager structure + * @name: name of low level fpga manager + * @dev: fpga manager device + * @ref_mutex: only allows one reference to fpga manager + * @state: state of fpga manager + * @mops: pointer to struct of fpga manager ops + * @priv: low level driver private date + */ +struct fpga_manager { + const char *name; + struct device dev; + struct mutex ref_mutex; + enum fpga_mgr_states state; + const struct fpga_manager_ops *mops; + void *priv; +}; + +#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) + +int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, + const char *buf, size_t count); + +int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, + const char *image_name); + +struct fpga_manager *of_fpga_mgr_get(struct device_node *node); + +void fpga_mgr_put(struct fpga_manager *mgr); + +int fpga_mgr_register(struct device *dev, const char *name, + const struct fpga_manager_ops *mops, void *priv); + +void fpga_mgr_unregister(struct device *dev); + +#endif /*_LINUX_FPGA_MGR_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 72d8a844c692..9a1cb8c605e0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1053,12 +1053,11 @@ extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); -extern int posix_lock_inode_wait(struct inode *, struct file_lock *); extern int posix_unblock_lock(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); -extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl); +extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); @@ -1144,12 +1143,6 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, return -ENOLCK; } -static inline int posix_lock_inode_wait(struct inode *inode, - struct file_lock *fl) -{ - return -ENOLCK; -} - static inline int posix_unblock_lock(struct file_lock *waiter) { return -ENOENT; @@ -1171,8 +1164,7 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) return 0; } -static inline int flock_lock_inode_wait(struct inode *inode, - struct file_lock *request) +static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { return -ENOLCK; } @@ -1215,14 +1207,9 @@ static inline struct inode *file_inode(const struct file *f) return f->f_inode; } -static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) -{ - return posix_lock_inode_wait(file_inode(filp), fl); -} - -static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl) +static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) { - return flock_lock_inode_wait(file_inode(filp), fl); + return locks_lock_inode_wait(file_inode(filp), fl); } struct fasync_struct { @@ -2422,6 +2409,7 @@ extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait(struct address_space *); +extern void filemap_fdatawait_keep_errors(struct address_space *); extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); extern int filemap_write_and_wait(struct address_space *mapping); diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h new file mode 100644 index 000000000000..84d971ff3fba --- /dev/null +++ b/include/linux/fsl/guts.h @@ -0,0 +1,192 @@ +/** + * Freecale 85xx and 86xx Global Utilties register set + * + * Authors: Jeff Brown + * Timur Tabi <timur@freescale.com> + * + * Copyright 2004,2007,2012 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __FSL_GUTS_H__ +#define __FSL_GUTS_H__ + +#include <linux/types.h> + +/** + * Global Utility Registers. + * + * Not all registers defined in this structure are available on all chips, so + * you are expected to know whether a given register actually exists on your + * chip before you access it. + * + * Also, some registers are similar on different chips but have slightly + * different names. In these cases, one name is chosen to avoid extraneous + * #ifdefs. + */ +struct ccsr_guts { + __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ + __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; + __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u8 res024[0x30 - 0x24]; + __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; + __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u8 res044[0x50 - 0x44]; + __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u8 res054[0x60 - 0x54]; + __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ + __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ + __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; + __be32 devdisr; /* 0x.0070 - Device Disable Control */ +#define CCSR_GUTS_DEVDISR_TB1 0x00001000 +#define CCSR_GUTS_DEVDISR_TB0 0x00004000 + __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; + __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ + __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ + __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ + __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ + __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ + __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ + __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ + __be32 autorstsr; /* 0x.009c - Automatic reset status register */ + __be32 pvr; /* 0x.00a0 - Processor Version Register */ + __be32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; + __be32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; + __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register + Called 'elbcvselcr' on 86xx SOCs */ + u8 res0c4[0x100 - 0xc4]; + __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; + __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x604 - 0x22c]; + __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; + __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; + __be32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; + __be32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; + __be32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; + __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; + __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; + __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; + __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u8 rese28[0xf04 - 0xe28]; + __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; + __be32 itcr; /* 0x.0f2c - Internal transaction control register */ + u8 resf30[0xf40 - 0xf30]; + __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ +} __attribute__ ((packed)); + + +/* Alternate function signal multiplex control */ +#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) + +#ifdef CONFIG_PPC_86xx + +#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ +#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ + +/* + * Set the DMACR register in the GUTS + * + * The DMACR register determines the source of initiated transfers for each + * channel on each DMA controller. Rather than have a bunch of repetitive + * macros for the bit patterns, we just have a function that calculates + * them. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) + */ +static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int device) +{ + unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); + + clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); +} + +#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 +#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ +#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ +#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ +#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ +#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ +#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 +#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 +#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 +#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 + +/* + * Set the DMA external control bits in the GUTS + * + * The DMA external control bits in the PMUXCR are only meaningful for + * channels 0 and 3. Any other channels are ignored. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * value: the new value for the bit (0 or 1) + */ +static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int value) +{ + if ((ch == 0) || (ch == 3)) { + unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; + + clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); + } +} + +#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 +#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 +#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ + (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) +#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 +#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 +#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) +#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF +#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) + +#endif + +#endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6cd8c0ee4b6f..eae6548efbf0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER + +#define STACK_TRACE_ENTRIES 500 + +struct stack_trace; + +extern unsigned stack_trace_index[]; +extern struct stack_trace stack_trace_max; +extern unsigned long stack_trace_max_size; +extern arch_spinlock_t stack_trace_max_lock; + extern int stack_tracer_enabled; +void stack_trace_print(void); int stack_trace_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 0408545bce42..851671742790 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -16,7 +16,9 @@ enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF, FWNODE_ACPI, + FWNODE_ACPI_DATA, FWNODE_PDATA, + FWNODE_IRQCHIP, }; struct fwnode_handle { diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index 09460d6d6682..a4c61cbce777 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -8,7 +8,7 @@ extern void genl_lock(void); extern void genl_unlock(void); #ifdef CONFIG_LOCKDEP -extern int lockdep_genl_is_held(void); +extern bool lockdep_genl_is_held(void); #endif /* for synchronisation between af_netlink and genetlink */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 2adbfa6d02bc..847cc1d91634 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -163,6 +163,18 @@ struct disk_part_tbl { struct disk_events; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +struct blk_integrity { + struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + struct gendisk { /* major, first_minor and minors are input parameters only, * don't use directly. Use disk_devt() and disk_max_parts(). @@ -198,8 +210,8 @@ struct gendisk { atomic_t sync_io; /* RAID */ struct disk_events *ev; #ifdef CONFIG_BLK_DEV_INTEGRITY - struct blk_integrity *integrity; -#endif + struct kobject integrity_kobj; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ int node_id; }; @@ -727,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) #endif } +#if defined(CONFIG_BLK_DEV_INTEGRITY) +extern void blk_integrity_add(struct gendisk *); +extern void blk_integrity_del(struct gendisk *); +extern void blk_integrity_revalidate(struct gendisk *); +#else /* CONFIG_BLK_DEV_INTEGRITY */ +static inline void blk_integrity_add(struct gendisk *disk) { } +static inline void blk_integrity_del(struct gendisk *disk) { } +static inline void blk_integrity_revalidate(struct gendisk *disk) { } +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f92cbd2f4450..6523109e136d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -14,7 +14,7 @@ struct vm_area_struct; #define ___GFP_HIGHMEM 0x02u #define ___GFP_DMA32 0x04u #define ___GFP_MOVABLE 0x08u -#define ___GFP_WAIT 0x10u +#define ___GFP_RECLAIMABLE 0x10u #define ___GFP_HIGH 0x20u #define ___GFP_IO 0x40u #define ___GFP_FS 0x80u @@ -29,18 +29,17 @@ struct vm_area_struct; #define ___GFP_NOMEMALLOC 0x10000u #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u -#define ___GFP_RECLAIMABLE 0x80000u +#define ___GFP_ATOMIC 0x80000u #define ___GFP_NOACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u -#define ___GFP_NO_KSWAPD 0x400000u +#define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_OTHER_NODE 0x800000u #define ___GFP_WRITE 0x1000000u +#define ___GFP_KSWAPD_RECLAIM 0x2000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* - * GFP bitmasks.. - * - * Zone modifiers (see linux/mmzone.h - low three bits) + * Physical address zone modifiers (see linux/mmzone.h - low four bits) * * Do not put any conditional on these. If necessary modify the definitions * without the underscores and use them consistently. The definitions here may @@ -50,116 +49,229 @@ struct vm_area_struct; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) + +/* + * Page mobility and placement hints + * + * These flags provide hints about how mobile the page is. Pages with similar + * mobility are placed within the same pageblocks to minimise problems due + * to external fragmentation. + * + * __GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. + * + * __GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * + * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). + * + * __GFP_HARDWALL enforces the cpuset memory allocation policy. + * + * __GFP_THISNODE forces the allocation to be satisified from the requested + * node with no fallbacks or placement policy enforcements. + */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) + /* - * Action modifiers - doesn't change the zoning + * Watermark modifiers -- controls access to emergency reserves + * + * __GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. + * + * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with __GFP_HIGH + * + * __GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * + * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the __GFP_MEMALLOC flag if both are set. + * + * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. + */ +#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) +#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) +#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) + +/* + * Reclaim modifiers + * + * __GFP_IO can start physical IO. + * + * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. + * + * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. + * + * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. + * + * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt - * _might_ fail. This depends upon the particular VM implementation. + * _might_ fail. This depends upon the particular VM implementation. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. New users should be evaluated carefully - * (and the flag should be used only when there is no reasonable failure policy) - * but it is definitely preferable to use the flag rather than opencode endless - * loop around allocator. + * cannot handle allocation failures. New users should be evaluated carefully + * (and the flag should be used only when there is no reasonable failure + * policy) but it is definitely preferable to use the flag rather than + * opencode endless loop around allocator. * * __GFP_NORETRY: The VM implementation must not retry indefinitely and will - * return NULL when direct reclaim and memory compaction have failed to allow - * the allocation to succeed. The OOM killer is not called with the current - * implementation. - * - * __GFP_MOVABLE: Flag that this page will be movable by the page migration - * mechanism or reclaimed + * return NULL when direct reclaim and memory compaction have failed to allow + * the allocation to succeed. The OOM killer is not called with the current + * implementation. */ -#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ -#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ -#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ -#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ -#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ -#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ -#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ -#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ -#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ -#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ -#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ -#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ -#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. - * This takes precedence over the - * __GFP_MEMALLOC flag if both are - * set - */ -#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ -#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ -#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ -#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */ -#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ - -#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) -#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ -#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) +#define __GFP_FS ((__force gfp_t)___GFP_FS) +#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ +#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ +#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) +#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* - * This may seem redundant, but it's a way of annotating false positives vs. - * allocations that simply cannot be supported (e.g. page tables). + * Action modifiers + * + * __GFP_COLD indicates that the caller does not expect to be used in the near + * future. Where possible, a cache-cold page will be returned. + * + * __GFP_NOWARN suppresses allocation failure reports. + * + * __GFP_COMP address compound page metadata. + * + * __GFP_ZERO returns a zeroed page on success. + * + * __GFP_NOTRACK avoids tracking with kmemcheck. + * + * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of + * distinguishing in the source between false positives and allocations that + * cannot be supported (e.g. page tables). + * + * __GFP_OTHER_NODE is for allocations that are on a remote node but that + * should not be accounted for as a remote allocation in vmstat. A + * typical user would be khugepaged collapsing a huge page on a remote + * node. */ +#define __GFP_COLD ((__force gfp_t)___GFP_COLD) +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) +#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ +/* Room for N __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 26 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) -/* This equals 0, but use constants in case they ever change */ -#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) -/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ -#define GFP_ATOMIC (__GFP_HIGH) -#define GFP_NOIO (__GFP_WAIT) -#define GFP_NOFS (__GFP_WAIT | __GFP_IO) -#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) -#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ +/* + * Useful GFP flag combinations that are commonly used. It is recommended + * that subsystems start with one of these combinations and then set/clear + * __GFP_FOO flags as necessary. + * + * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves" + * + * GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * + * GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. + * + * GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. + * + * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. + * + * GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. + * + * GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. + * + * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit + * address. + * + * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. + * + * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. + * + * GFP_TRANSHUGE is used for THP allocations. They are compound allocations + * that will fail quickly if memory is not available and will not wake + * kswapd on failure. + */ +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) +#define GFP_NOIO (__GFP_RECLAIM) +#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) +#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ __GFP_RECLAIMABLE) -#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_DMA __GFP_DMA +#define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) -#define GFP_IOFS (__GFP_IO | __GFP_FS) -#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ - __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ - __GFP_NO_KSWAPD) +#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ + ~__GFP_KSWAPD_RECLAIM) -/* This mask makes up all the page movable related flags */ +/* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) +#define GFP_MOVABLE_SHIFT 3 -/* Control page allocator reclaim behavior */ -#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ - __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ - __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) - -/* Control slab gfp mask during early boot */ -#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) - -/* Control allocation constraints */ -#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) - -/* Do not use these with a slab allocator */ -#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) - -/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some - platforms, used as appropriate on others */ - -#define GFP_DMA __GFP_DMA - -/* 4GB DMA on some platforms */ -#define GFP_DMA32 __GFP_DMA32 - -/* Convert GFP flags to their corresponding migrate type */ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { - WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); + BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | - ((gfp_flags & __GFP_RECLAIMABLE) != 0); + return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; +} +#undef GFP_MOVABLE_MASK +#undef GFP_MOVABLE_SHIFT + +static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) +{ + return gfp_flags & __GFP_DIRECT_RECLAIM; } #ifdef CONFIG_HIGHMEM diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 14cac67c2012..fb0fde686cb1 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -400,6 +400,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio) { return ERR_PTR(-EINVAL); } + static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 1aed31c5ffba..d1baebf350d8 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -206,6 +206,9 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, #endif /* CONFIG_GPIOLIB_IRQCHIP */ +int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); +void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); + #ifdef CONFIG_PINCTRL /** diff --git a/include/linux/hid.h b/include/linux/hid.h index f17980de2662..251a1d382e23 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -698,8 +698,8 @@ struct hid_driver { int (*input_mapped)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); - void (*input_configured)(struct hid_device *hdev, - struct hid_input *hidinput); + int (*input_configured)(struct hid_device *hdev, + struct hid_input *hidinput); void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6aefcd0031a6..bb3f3297062a 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -78,7 +78,6 @@ static inline void __kunmap_atomic(void *addr) } #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) -#define kmap_atomic_to_page(ptr) virt_to_page(ptr) #define kmap_flush_unused() do {} while(0) #endif diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5e35379f58a5..685c262e0be8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -483,6 +483,17 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, #define hugepages_supported() (HPAGE_SHIFT != 0) #endif +void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); + +static inline void hugetlb_count_add(long l, struct mm_struct *mm) +{ + atomic_long_add(l, &mm->hugetlb_usage); +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ + atomic_long_sub(l, &mm->hugetlb_usage); +} #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; #define alloc_huge_page(v, a, r) NULL @@ -519,6 +530,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, { return &mm->page_table_lock; } + +static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) +{ +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index bcc853eccc85..24154c26d469 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -32,7 +32,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; - return (struct hugetlb_cgroup *)page[2].lru.next; + return (struct hugetlb_cgroup *)page[2].private; } static inline @@ -42,15 +42,13 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; - page[2].lru.next = (void *)h_cg; + page[2].private = (unsigned long)h_cg; return 0; } static inline bool hugetlb_cgroup_disabled(void) { - if (hugetlb_cgrp_subsys.disabled) - return true; - return false; + return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); } extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 54733d5b503e..8fdc17b84739 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -26,6 +26,7 @@ #define _HYPERV_H #include <uapi/linux/hyperv.h> +#include <uapi/asm/hyperv.h> #include <linux/types.h> #include <linux/scatterlist.h> diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index cfa906f28b7a..452c0b0d2f32 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -121,7 +121,7 @@ #define IEEE80211_MAX_SN IEEE80211_SN_MASK #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) -static inline int ieee80211_sn_less(u16 sn1, u16 sn2) +static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) { return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); } @@ -250,7 +250,7 @@ struct ieee80211_qos_hdr { * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_tods(__le16 fc) +static inline bool ieee80211_has_tods(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; } @@ -259,7 +259,7 @@ static inline int ieee80211_has_tods(__le16 fc) * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_fromds(__le16 fc) +static inline bool ieee80211_has_fromds(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; } @@ -268,7 +268,7 @@ static inline int ieee80211_has_fromds(__le16 fc) * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_a4(__le16 fc) +static inline bool ieee80211_has_a4(__le16 fc) { __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); return (fc & tmp) == tmp; @@ -278,7 +278,7 @@ static inline int ieee80211_has_a4(__le16 fc) * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_morefrags(__le16 fc) +static inline bool ieee80211_has_morefrags(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; } @@ -287,7 +287,7 @@ static inline int ieee80211_has_morefrags(__le16 fc) * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_retry(__le16 fc) +static inline bool ieee80211_has_retry(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; } @@ -296,7 +296,7 @@ static inline int ieee80211_has_retry(__le16 fc) * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_pm(__le16 fc) +static inline bool ieee80211_has_pm(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; } @@ -305,7 +305,7 @@ static inline int ieee80211_has_pm(__le16 fc) * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_moredata(__le16 fc) +static inline bool ieee80211_has_moredata(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; } @@ -314,7 +314,7 @@ static inline int ieee80211_has_moredata(__le16 fc) * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_protected(__le16 fc) +static inline bool ieee80211_has_protected(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; } @@ -323,7 +323,7 @@ static inline int ieee80211_has_protected(__le16 fc) * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_order(__le16 fc) +static inline bool ieee80211_has_order(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; } @@ -332,7 +332,7 @@ static inline int ieee80211_has_order(__le16 fc) * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_mgmt(__le16 fc) +static inline bool ieee80211_is_mgmt(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT); @@ -342,7 +342,7 @@ static inline int ieee80211_is_mgmt(__le16 fc) * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_ctl(__le16 fc) +static inline bool ieee80211_is_ctl(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL); @@ -352,7 +352,7 @@ static inline int ieee80211_is_ctl(__le16 fc) * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_data(__le16 fc) +static inline bool ieee80211_is_data(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA); @@ -362,7 +362,7 @@ static inline int ieee80211_is_data(__le16 fc) * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_data_qos(__le16 fc) +static inline bool ieee80211_is_data_qos(__le16 fc) { /* * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need @@ -376,7 +376,7 @@ static inline int ieee80211_is_data_qos(__le16 fc) * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_data_present(__le16 fc) +static inline bool ieee80211_is_data_present(__le16 fc) { /* * mask with 0x40 and test that that bit is clear to only return true @@ -390,7 +390,7 @@ static inline int ieee80211_is_data_present(__le16 fc) * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_assoc_req(__le16 fc) +static inline bool ieee80211_is_assoc_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); @@ -400,7 +400,7 @@ static inline int ieee80211_is_assoc_req(__le16 fc) * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_assoc_resp(__le16 fc) +static inline bool ieee80211_is_assoc_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); @@ -410,7 +410,7 @@ static inline int ieee80211_is_assoc_resp(__le16 fc) * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_reassoc_req(__le16 fc) +static inline bool ieee80211_is_reassoc_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); @@ -420,7 +420,7 @@ static inline int ieee80211_is_reassoc_req(__le16 fc) * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_reassoc_resp(__le16 fc) +static inline bool ieee80211_is_reassoc_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); @@ -430,7 +430,7 @@ static inline int ieee80211_is_reassoc_resp(__le16 fc) * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_probe_req(__le16 fc) +static inline bool ieee80211_is_probe_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); @@ -440,7 +440,7 @@ static inline int ieee80211_is_probe_req(__le16 fc) * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_probe_resp(__le16 fc) +static inline bool ieee80211_is_probe_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); @@ -450,7 +450,7 @@ static inline int ieee80211_is_probe_resp(__le16 fc) * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_beacon(__le16 fc) +static inline bool ieee80211_is_beacon(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); @@ -460,7 +460,7 @@ static inline int ieee80211_is_beacon(__le16 fc) * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_atim(__le16 fc) +static inline bool ieee80211_is_atim(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); @@ -470,7 +470,7 @@ static inline int ieee80211_is_atim(__le16 fc) * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_disassoc(__le16 fc) +static inline bool ieee80211_is_disassoc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); @@ -480,7 +480,7 @@ static inline int ieee80211_is_disassoc(__le16 fc) * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_auth(__le16 fc) +static inline bool ieee80211_is_auth(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); @@ -490,7 +490,7 @@ static inline int ieee80211_is_auth(__le16 fc) * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_deauth(__le16 fc) +static inline bool ieee80211_is_deauth(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); @@ -500,7 +500,7 @@ static inline int ieee80211_is_deauth(__le16 fc) * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_action(__le16 fc) +static inline bool ieee80211_is_action(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -510,7 +510,7 @@ static inline int ieee80211_is_action(__le16 fc) * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_back_req(__le16 fc) +static inline bool ieee80211_is_back_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); @@ -520,7 +520,7 @@ static inline int ieee80211_is_back_req(__le16 fc) * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_back(__le16 fc) +static inline bool ieee80211_is_back(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); @@ -530,7 +530,7 @@ static inline int ieee80211_is_back(__le16 fc) * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_pspoll(__le16 fc) +static inline bool ieee80211_is_pspoll(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); @@ -540,7 +540,7 @@ static inline int ieee80211_is_pspoll(__le16 fc) * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_rts(__le16 fc) +static inline bool ieee80211_is_rts(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); @@ -550,7 +550,7 @@ static inline int ieee80211_is_rts(__le16 fc) * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_cts(__le16 fc) +static inline bool ieee80211_is_cts(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); @@ -560,7 +560,7 @@ static inline int ieee80211_is_cts(__le16 fc) * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_ack(__le16 fc) +static inline bool ieee80211_is_ack(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); @@ -570,7 +570,7 @@ static inline int ieee80211_is_ack(__le16 fc) * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_cfend(__le16 fc) +static inline bool ieee80211_is_cfend(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); @@ -580,7 +580,7 @@ static inline int ieee80211_is_cfend(__le16 fc) * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_cfendack(__le16 fc) +static inline bool ieee80211_is_cfendack(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); @@ -590,7 +590,7 @@ static inline int ieee80211_is_cfendack(__le16 fc) * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_nullfunc(__le16 fc) +static inline bool ieee80211_is_nullfunc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); @@ -600,7 +600,7 @@ static inline int ieee80211_is_nullfunc(__le16 fc) * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_qos_nullfunc(__le16 fc) +static inline bool ieee80211_is_qos_nullfunc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); @@ -624,7 +624,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set * @seq_ctrl: frame sequence control bytes in little-endian byteorder */ -static inline int ieee80211_is_first_frag(__le16 seq_ctrl) +static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) { return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; } @@ -1379,6 +1379,7 @@ struct ieee80211_ht_operation { /* block-ack parameters */ +#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001 #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 @@ -1745,8 +1746,7 @@ enum ieee80211_eid { WLAN_EID_TIM = 5, WLAN_EID_IBSS_PARAMS = 6, WLAN_EID_COUNTRY = 7, - WLAN_EID_HP_PARAMS = 8, - WLAN_EID_HP_TABLE = 9, + /* 8, 9 reserved */ WLAN_EID_REQUEST = 10, WLAN_EID_QBSS_LOAD = 11, WLAN_EID_EDCA_PARAM_SET = 12, @@ -1932,6 +1932,8 @@ enum ieee80211_category { WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, + WLAN_CATEGORY_WNM = 10, + WLAN_CATEGORY_WNM_UNPROTECTED = 11, WLAN_CATEGORY_TDLS = 12, WLAN_CATEGORY_MESH_ACTION = 13, WLAN_CATEGORY_MULTIHOP_ACTION = 14, @@ -2396,7 +2398,10 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) category = ((u8 *) hdr) + 24; return *category != WLAN_CATEGORY_PUBLIC && *category != WLAN_CATEGORY_HT && + *category != WLAN_CATEGORY_WNM_UNPROTECTED && *category != WLAN_CATEGORY_SELF_PROTECTED && + *category != WLAN_CATEGORY_UNPROT_DMG && + *category != WLAN_CATEGORY_VHT && *category != WLAN_CATEGORY_VENDOR_SPECIFIC; } diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 1dc1f4ed4001..d3e415674dac 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -25,12 +25,22 @@ #include <linux/types.h> #include <linux/random.h> -#include <asm/byteorder.h> #define IEEE802154_MTU 127 #define IEEE802154_ACK_PSDU_LEN 5 #define IEEE802154_MIN_PSDU_LEN 9 #define IEEE802154_FCS_LEN 2 +#define IEEE802154_MAX_AUTH_TAG_LEN 16 + +/* General MAC frame format: + * 2 bytes: Frame Control + * 1 byte: Sequence Number + * 20 bytes: Addressing fields + * 14 bytes: Auxiliary Security Header + */ +#define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14) +#define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \ + IEEE802154_FCS_LEN) #define IEEE802154_PAN_ID_BROADCAST 0xffff #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff @@ -205,6 +215,41 @@ enum { IEEE802154_SCAN_IN_PROGRESS = 0xfc, }; +/* frame control handling */ +#define IEEE802154_FCTL_FTYPE 0x0003 +#define IEEE802154_FCTL_ACKREQ 0x0020 +#define IEEE802154_FCTL_INTRA_PAN 0x0040 + +#define IEEE802154_FTYPE_DATA 0x0001 + +/* + * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee802154_is_data(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) == + cpu_to_le16(IEEE802154_FTYPE_DATA); +} + +/** + * ieee802154_is_ackreq - check if acknowledgment request bit is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_ackreq(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ); +} + +/** + * ieee802154_is_intra_pan - check if intra pan id communication + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_intra_pan(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN); +} + /** * ieee802154_is_valid_psdu_len - check if psdu len is valid * available lengths: diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index dad8b00beed2..a338a688ee4a 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -46,6 +46,12 @@ struct br_ip_list { #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) +/* values as per ieee8021QBridgeFdbAgingTime */ +#define BR_MIN_AGEING_TIME (10 * HZ) +#define BR_MAX_AGEING_TIME (1000000 * HZ) + +#define BR_DEFAULT_AGEING_TIME (300 * HZ) + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int br_should_route_hook_t(struct sk_buff *skb); diff --git a/include/linux/if_link.h b/include/linux/if_link.h index ae5d0d22955d..f923d15b432c 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -24,5 +24,6 @@ struct ifla_vf_info { __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; + __u32 trusted; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 908429216d9f..9c9de11549a7 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -110,7 +110,7 @@ struct ip_mc_list { #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) -extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto); +extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); extern int igmp_rcv(struct sk_buff *); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 3c17cd7fdf06..2fe939c73cd2 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -271,6 +271,10 @@ void st_sensors_power_enable(struct iio_dev *indio_dev); void st_sensors_power_disable(struct iio_dev *indio_dev); +int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, + unsigned reg, unsigned writeval, + unsigned *readval); + int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr); int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 7bb7f673cb3f..19c94c9acc81 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -294,6 +294,7 @@ static inline s64 iio_get_time_ns(void) #define INDIO_BUFFER_TRIGGERED 0x02 #define INDIO_BUFFER_SOFTWARE 0x04 #define INDIO_BUFFER_HARDWARE 0x08 +#define INDIO_EVENT_TRIGGERED 0x10 #define INDIO_ALL_BUFFER_MODES \ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) @@ -457,6 +458,7 @@ struct iio_buffer_setup_ops { * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) * @pollfunc: [DRIVER] function run on trigger being received + * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table * @num_channels: [DRIVER] number of channels specified in @channels. * @channel_attr_list: [INTERN] keep track of automatically created channel @@ -495,6 +497,7 @@ struct iio_dev { unsigned scan_index_timestamp; struct iio_trigger *trig; struct iio_poll_func *pollfunc; + struct iio_poll_func *pollfunc_event; struct iio_chan_spec const *channels; int num_channels; diff --git a/include/linux/iio/triggered_event.h b/include/linux/iio/triggered_event.h new file mode 100644 index 000000000000..8fe8537085bb --- /dev/null +++ b/include/linux/iio/triggered_event.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_IIO_TRIGGERED_EVENT_H_ +#define _LINUX_IIO_TRIGGERED_EVENT_H_ + +#include <linux/interrupt.h> + +int iio_triggered_event_setup(struct iio_dev *indio_dev, + irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p)); +void iio_triggered_event_cleanup(struct iio_dev *indio_dev); + +#endif diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index a4328cea376a..ee971f335a8b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -171,7 +171,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope); struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); -static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) +static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } @@ -180,15 +180,15 @@ static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) * Check if a mask is acceptable. */ -static __inline__ int bad_mask(__be32 mask, __be32 addr) +static __inline__ bool bad_mask(__be32 mask, __be32 addr) { __u32 hmask; if (addr & (mask = ~mask)) - return 1; + return true; hmask = ntohl(mask); if (hmask & (hmask+1)) - return 1; - return 0; + return true; + return false; } #define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e38681f4912d..1c1ff7e4faa4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -25,13 +25,6 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; -#ifdef CONFIG_CGROUPS -#define INIT_GROUP_RWSEM(sig) \ - .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), -#else -#define INIT_GROUP_RWSEM(sig) -#endif - #ifdef CONFIG_CPUSETS #define INIT_CPUSET_SEQ(tsk) \ .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), @@ -59,12 +52,12 @@ extern struct fs_struct init_fs; .rlim = INIT_RLIMITS, \ .cputimer = { \ .cputime_atomic = INIT_CPUTIME_ATOMIC, \ - .running = 0, \ + .running = false, \ + .checking_timer = false, \ }, \ INIT_PREV_CPUTIME(sig) \ .cred_guard_mutex = \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ - INIT_GROUP_RWSEM(sig) \ } extern struct nsproxy init_nsproxy; diff --git a/include/linux/input.h b/include/linux/input.h index 82ce323b9986..1e967694e9a5 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -469,6 +469,8 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); int input_set_keycode(struct input_dev *dev, const struct input_keymap_entry *ke); +void input_enable_softrepeat(struct input_dev *dev, int delay, int period); + extern struct class input_class; /** diff --git a/include/linux/input/edt-ft5x06.h b/include/linux/input/edt-ft5x06.h deleted file mode 100644 index 8a1e0d1a0124..000000000000 --- a/include/linux/input/edt-ft5x06.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _EDT_FT5X06_H -#define _EDT_FT5X06_H - -/* - * Copyright (c) 2012 Simon Budig, <simon.budig@kernelconcepts.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -struct edt_ft5x06_platform_data { - int irq_pin; - int reset_pin; - - /* startup defaults for operational parameters */ - bool use_parameters; - u8 gain; - u8 threshold; - u8 offset; - u8 report_rate; -}; - -#endif /* _EDT_FT5X06_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6240063bdcac..821273ca4873 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -1,5 +1,9 @@ /* - * Copyright (c) 2006, Intel Corporation. + * Copyright © 2006-2015, Intel Corporation. + * + * Authors: Ashok Raj <ashok.raj@intel.com> + * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + * David Woodhouse <David.Woodhouse@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -13,10 +17,6 @@ * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Ashok Raj <ashok.raj@intel.com> - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #ifndef _INTEL_IOMMU_H_ @@ -25,7 +25,10 @@ #include <linux/types.h> #include <linux/iova.h> #include <linux/io.h> +#include <linux/idr.h> #include <linux/dma_remapping.h> +#include <linux/mmu_notifier.h> +#include <linux/list.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -57,16 +60,21 @@ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ +#define DMAR_PQH_REG 0xc0 /* Page request queue head register */ +#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ +#define DMAR_PQA_REG 0xd0 /* Page request queue address register */ +#define DMAR_PRS_REG 0xdc /* Page request status register */ +#define DMAR_PECTL_REG 0xe0 /* Page request event control register */ +#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ +#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ +#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ #define OFFSET_STRIDE (9) -/* -#define dmar_readl(dmar, reg) readl(dmar + reg) -#define dmar_readq(dmar, reg) ({ \ - u32 lo, hi; \ - lo = readl(dmar + reg); \ - hi = readl(dmar + reg + 4); \ - (((u64) hi) << 32) + lo; }) -*/ + +#ifdef CONFIG_64BIT +#define dmar_readq(a) readq(a) +#define dmar_writeq(a,v) writeq(v,a) +#else static inline u64 dmar_readq(void __iomem *addr) { u32 lo, hi; @@ -80,6 +88,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) writel((u32)val, addr); writel((u32)(val >> 32), addr + 4); } +#endif #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) @@ -123,7 +132,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1) -/* PASID support used to be on bit 28 */ +#define ecap_broken_pasid(e) ((e >> 28) & 0x1) #define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1) @@ -253,6 +262,11 @@ enum { #define QI_DIOTLB_TYPE 0x3 #define QI_IEC_TYPE 0x4 #define QI_IWD_TYPE 0x5 +#define QI_EIOTLB_TYPE 0x6 +#define QI_PC_TYPE 0x7 +#define QI_DEIOTLB_TYPE 0x8 +#define QI_PGRP_RESP_TYPE 0x9 +#define QI_PSTRM_RESP_TYPE 0xa #define QI_IEC_SELECTIVE (((u64)1) << 4) #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) @@ -280,6 +294,53 @@ enum { #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 +#define QI_PC_PASID(pasid) (((u64)pasid) << 32) +#define QI_PC_DID(did) (((u64)did) << 16) +#define QI_PC_GRAN(gran) (((u64)gran) << 4) + +#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) +#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) + +#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) +#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) +#define QI_EIOTLB_AM(am) (((u64)am)) +#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) +#define QI_EIOTLB_DID(did) (((u64)did) << 16) +#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) + +#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) +#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) +#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) +#define QI_DEV_EIOTLB_MAX_INVS 32 + +#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PGRP_RESP_CODE(res) ((u64)(res)) +#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) +#define QI_PGRP_DID(did) (((u64)(did)) << 16) +#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) + +#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) +#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) +#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) +#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) +#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) +#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) + +#define QI_RESP_SUCCESS 0x0 +#define QI_RESP_INVALID 0x1 +#define QI_RESP_FAILURE 0xf + +#define QI_GRAN_ALL_ALL 0 +#define QI_GRAN_NONG_ALL 1 +#define QI_GRAN_NONG_PASID 2 +#define QI_GRAN_PSI_PASID 3 + struct qi_desc { u64 low, high; }; @@ -327,6 +388,10 @@ enum { #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) +struct pasid_entry; +struct pasid_state_entry; +struct page_req_dsc; + struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 reg_phys; /* physical address of hw register set */ @@ -338,7 +403,7 @@ struct intel_iommu { int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ int msagaw; /* max sagaw of this iommu */ - unsigned int irq; + unsigned int irq, pr_irq; u16 segment; /* PCI segment# */ unsigned char name[13]; /* Device Name */ @@ -350,6 +415,18 @@ struct intel_iommu { struct iommu_flush flush; #endif +#ifdef CONFIG_INTEL_IOMMU_SVM + /* These are large and need to be contiguous, so we allocate just + * one for now. We'll maybe want to rethink that if we truly give + * devices away to userspace processes (e.g. for DPDK) and don't + * want to trust that userspace will use *only* the PASID it was + * told to. But while it's all driver-arbitrated, we're fine. */ + struct pasid_entry *pasid_table; + struct pasid_state_entry *pasid_state_table; + struct page_req_dsc *prq; + unsigned char prq_name[16]; /* Name for PRQ interrupt */ + struct idr pasid_idr; +#endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -389,6 +466,38 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); +#ifdef CONFIG_INTEL_IOMMU_SVM +extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu); +extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu); +extern int intel_svm_enable_prq(struct intel_iommu *iommu); +extern int intel_svm_finish_prq(struct intel_iommu *iommu); + +struct svm_dev_ops; + +struct intel_svm_dev { + struct list_head list; + struct rcu_head rcu; + struct device *dev; + struct svm_dev_ops *ops; + int users; + u16 did; + u16 dev_iotlb:1; + u16 sid, qdep; +}; + +struct intel_svm { + struct mmu_notifier notifier; + struct mm_struct *mm; + struct intel_iommu *iommu; + int flags; + int pasid; + struct list_head devs; +}; + +extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); +extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); +#endif + extern const struct attribute_group *intel_iommu_groups[]; #endif diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h new file mode 100644 index 000000000000..3c25794042f9 --- /dev/null +++ b/include/linux/intel-svm.h @@ -0,0 +1,121 @@ +/* + * Copyright © 2015 Intel Corporation. + * + * Authors: David Woodhouse <David.Woodhouse@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __INTEL_SVM_H__ +#define __INTEL_SVM_H__ + +struct device; + +struct svm_dev_ops { + void (*fault_cb)(struct device *dev, int pasid, u64 address, + u32 private, int rwxp, int response); +}; + +/* Values for rxwp in fault_cb callback */ +#define SVM_REQ_READ (1<<3) +#define SVM_REQ_WRITE (1<<2) +#define SVM_REQ_EXEC (1<<1) +#define SVM_REQ_PRIV (1<<0) + + +/* + * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" + * PASID for the current process. Even if a PASID already exists, a new one + * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID + * will not be given to subsequent callers. This facility allows a driver to + * disambiguate between multiple device contexts which access the same MM, + * if there is no other way to do so. It should be used sparingly, if at all. + */ +#define SVM_FLAG_PRIVATE_PASID (1<<0) + +/* + * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only + * for access to kernel addresses. No IOTLB flushes are automatically done + * for kernel mappings; it is valid only for access to the kernel's static + * 1:1 mapping of physical memory — not to vmalloc or even module mappings. + * A future API addition may permit the use of such ranges, by means of an + * explicit IOTLB flush call (akin to the DMA API's unmap method). + * + * It is unlikely that we will ever hook into flush_tlb_kernel_range() to + * do such IOTLB flushes automatically. + */ +#define SVM_FLAG_SUPERVISOR_MODE (1<<1) + +#ifdef CONFIG_INTEL_IOMMU_SVM + +/** + * intel_svm_bind_mm() - Bind the current process to a PASID + * @dev: Device to be granted acccess + * @pasid: Address for allocated PASID + * @flags: Flags. Later for requesting supervisor mode, etc. + * @ops: Callbacks to device driver + * + * This function attempts to enable PASID support for the given device. + * If the @pasid argument is non-%NULL, a PASID is allocated for access + * to the MM of the current process. + * + * By using a %NULL value for the @pasid argument, this function can + * be used to simply validate that PASID support is available for the + * given device — i.e. that it is behind an IOMMU which has the + * requisite support, and is enabled. + * + * Page faults are handled transparently by the IOMMU code, and there + * should be no need for the device driver to be involved. If a page + * fault cannot be handled (i.e. is an invalid address rather than + * just needs paging in), then the page request will be completed by + * the core IOMMU code with appropriate status, and the device itself + * can then report the resulting fault to its driver via whatever + * mechanism is appropriate. + * + * Multiple calls from the same process may result in the same PASID + * being re-used. A reference count is kept. + */ +extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, + struct svm_dev_ops *ops); + +/** + * intel_svm_unbind_mm() - Unbind a specified PASID + * @dev: Device for which PASID was allocated + * @pasid: PASID value to be unbound + * + * This function allows a PASID to be retired when the device no + * longer requires access to the address space of a given process. + * + * If the use count for the PASID in question reaches zero, the + * PASID is revoked and may no longer be used by hardware. + * + * Device drivers are required to ensure that no access (including + * page requests) is currently outstanding for the PASID in question, + * before calling this function. + */ +extern int intel_svm_unbind_mm(struct device *dev, int pasid); + +#else /* CONFIG_INTEL_IOMMU_SVM */ + +static inline int intel_svm_bind_mm(struct device *dev, int *pasid, + int flags, struct svm_dev_ops *ops) +{ + return -ENOSYS; +} + +static inline int intel_svm_unbind_mm(struct device *dev, int pasid) +{ + BUG(); +} +#endif /* CONFIG_INTEL_IOMMU_SVM */ + +#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) + +#endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index be7e75c945e9..ad16809c8596 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @flags: flags (see IRQF_* above) * @thread_fn: interrupt handler function for threaded interrupts * @thread: thread pointer for threaded interrupts + * @secondary: pointer to secondary irqaction (force threading) * @thread_flags: flags related to @thread * @thread_mask: bitmask for keeping track of @thread activity * @dir: pointer to the proc/irq/NN/name entry @@ -113,6 +114,7 @@ struct irqaction { struct irqaction *next; irq_handler_t thread_fn; struct task_struct *thread; + struct irqaction *secondary; unsigned int irq; unsigned int flags; unsigned long thread_flags; diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h new file mode 100644 index 000000000000..11d7e840d913 --- /dev/null +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_ +#define _LINUX_IO_64_NONATOMIC_HI_LO_H_ + +#include <linux/io.h> +#include <asm-generic/int-ll64.h> + +static inline __u64 hi_lo_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl(p + 1); + low = readl(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val >> 32, addr + 4); + writel(val, addr); +} + +#ifndef readq +#define readq hi_lo_readq +#endif + +#ifndef writeq +#define writeq hi_lo_writeq +#endif + +#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h new file mode 100644 index 000000000000..1a4315f97360 --- /dev/null +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_ +#define _LINUX_IO_64_NONATOMIC_LO_HI_H_ + +#include <linux/io.h> +#include <asm-generic/int-ll64.h> + +static inline __u64 lo_hi_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} + +#ifndef readq +#define readq lo_hi_readq +#endif + +#ifndef writeq +#define writeq lo_hi_writeq +#endif + +#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h index bbced83b32ee..376a27c9cc6a 100644 --- a/include/linux/iommu-common.h +++ b/include/linux/iommu-common.h @@ -7,6 +7,7 @@ #define IOMMU_POOL_HASHBITS 4 #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) +#define IOMMU_ERROR_CODE (~(unsigned long) 0) struct iommu_pool { unsigned long start; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f9c1b6d0f2e4..f28dff313b07 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -81,6 +81,7 @@ struct iommu_domain { iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; + void *iova_cookie; }; enum iommu_cap { @@ -167,7 +168,7 @@ struct iommu_ops { phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); - int (*device_group)(struct device *dev, unsigned int *groupid); + struct iommu_group *(*device_group)(struct device *dev); int (*domain_get_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); int (*domain_set_attr)(struct iommu_domain *domain, @@ -316,6 +317,11 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain, return domain->ops->map_sg(domain, iova, sg, nents, prot); } +/* PCI device grouping function */ +extern struct iommu_group *pci_device_group(struct device *dev); +/* Generic device grouping function */ +extern struct iommu_group *generic_device_group(struct device *dev); + #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 388e3ae94f7a..24bea087e7af 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -94,6 +94,7 @@ struct resource { /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) #define IORESOURCE_IO_FIXED (1<<1) +#define IORESOURCE_IO_SPARSE (1<<2) /* PCI ROM control bits (IORESOURCE_BITS) */ #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index f1f32af6d9b9..0ef2a97ccdb5 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -264,9 +264,9 @@ struct tcp6_timewait_sock { }; #if IS_ENABLED(CONFIG_IPV6) -static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) +static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) { - return inet_sk(__sk)->pinet6; + return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; } static inline struct raw6_sock *raw6_sk(const struct sock *sk) diff --git a/include/linux/irq.h b/include/linux/irq.h index 11bf09288ddb..3c1c96786248 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -67,11 +67,12 @@ enum irqchip_irq_state; * request/setup_irq() * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context - * IRQ_NESTED_TRHEAD - Interrupt nests into another thread + * IRQ_NESTED_THREAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. + * IRQ_DISABLE_UNLAZY - Disable lazy irq disable */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -97,13 +98,14 @@ enum { IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), + IRQ_DISABLE_UNLAZY = (1 << 19), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -297,21 +299,6 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } -/* - * Functions for chained handlers which can be enabled/disabled by the - * standard disable_irq/enable_irq calls. Must be called with - * irq_desc->lock held. - */ -static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS; -} - -static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) -{ - __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS; -} - static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { return d->hwirq; @@ -452,6 +439,8 @@ extern int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask, bool force); extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); +extern void irq_migrate_all_off_this_cpu(void); + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) void irq_move_irq(struct irq_data *data); void irq_move_masked_irq(struct irq_data *data); diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h new file mode 100644 index 000000000000..1551b5b2f4c2 --- /dev/null +++ b/include/linux/irqbypass.h @@ -0,0 +1,90 @@ +/* + * IRQ offload/bypass manager + * + * Copyright (C) 2015 Red Hat, Inc. + * Copyright (c) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef IRQBYPASS_H +#define IRQBYPASS_H + +#include <linux/list.h> + +struct irq_bypass_consumer; + +/* + * Theory of operation + * + * The IRQ bypass manager is a simple set of lists and callbacks that allows + * IRQ producers (ex. physical interrupt sources) to be matched to IRQ + * consumers (ex. virtualization hardware that allows IRQ bypass or offload) + * via a shared token (ex. eventfd_ctx). Producers and consumers register + * independently. When a token match is found, the optional @stop callback + * will be called for each participant. The pair will then be connected via + * the @add_* callbacks, and finally the optional @start callback will allow + * any final coordination. When either participant is unregistered, the + * process is repeated using the @del_* callbacks in place of the @add_* + * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings + * are not supported. + */ + +/** + * struct irq_bypass_producer - IRQ bypass producer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer + * @irq: Linux IRQ number for the producer device + * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) + * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass producer structure represents an interrupt source for + * participation in possible host bypass, for instance an interrupt vector + * for a physical device assigned to a VM. + */ +struct irq_bypass_producer { + struct list_head node; + void *token; + int irq; + int (*add_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*del_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*stop)(struct irq_bypass_producer *); + void (*start)(struct irq_bypass_producer *); +}; + +/** + * struct irq_bypass_consumer - IRQ bypass consumer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer + * @add_producer: Connect the IRQ consumer to an IRQ producer + * @del_producer: Disconnect the IRQ consumer from an IRQ producer + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass consumer structure represents an interrupt sink for + * participation in possible host bypass, for instance a hypervisor may + * support offloads to allow bypassing the host entirely or offload + * portions of the interrupt handling to the VM. + */ +struct irq_bypass_consumer { + struct list_head node; + void *token; + int (*add_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*del_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*stop)(struct irq_bypass_consumer *); + void (*start)(struct irq_bypass_consumer *); +}; + +int irq_bypass_register_producer(struct irq_bypass_producer *); +void irq_bypass_unregister_producer(struct irq_bypass_producer *); +int irq_bypass_register_consumer(struct irq_bypass_consumer *); +void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); + +#endif /* IRQBYPASS_H */ diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 638887376e58..89c34b200671 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -11,6 +11,7 @@ #ifndef _LINUX_IRQCHIP_H #define _LINUX_IRQCHIP_H +#include <linux/acpi.h> #include <linux/of.h> /* @@ -25,6 +26,22 @@ */ #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) +/* + * This macro must be used by the different irqchip drivers to declare + * the association between their version and their initialization function. + * + * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the + * same file. + * @subtable: Subtable to be identified in MADT + * @validate: Function to be called on that subtable to check its validity. + * Can be NULL. + * @data: data to be checked by the validate function. + * @fn: initialization function + */ +#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ + ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ + subtable, validate, data, fn) + #ifdef CONFIG_IRQCHIP void irqchip_init(void); #else diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h deleted file mode 100644 index de3419ed3937..000000000000 --- a/include/linux/irqchip/arm-gic-acpi.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (C) 2014, Linaro Ltd. - * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef ARM_GIC_ACPI_H_ -#define ARM_GIC_ACPI_H_ - -#ifdef CONFIG_ACPI - -/* - * Hard code here, we can not get memory size from MADT (but FDT does), - * Actually no need to do that, because this size can be inferred - * from GIC spec. - */ -#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K) -#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) - -struct acpi_table_header; - -int gic_v2_acpi_init(struct acpi_table_header *table); -void acpi_gic_init(void); -#else -static inline void acpi_gic_init(void) { } -#endif - -#endif /* ARM_GIC_ACPI_H_ */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 9eeeb9589acf..c9ae0c6ec050 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -18,8 +18,6 @@ #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H #define __LINUX_IRQCHIP_ARM_GIC_V3_H -#include <asm/sysreg.h> - /* * Distributor registers. We assume we're running non-secure, with ARE * being set. Secure-only and non-ARE registers are not described. @@ -231,6 +229,7 @@ #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 @@ -266,16 +265,16 @@ /* * Hypervisor interface registers (SRE only) */ -#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) - -#define ICH_LR_EOI (1UL << 41) -#define ICH_LR_GROUP (1UL << 60) -#define ICH_LR_HW (1UL << 61) -#define ICH_LR_STATE (3UL << 62) -#define ICH_LR_PENDING_BIT (1UL << 62) -#define ICH_LR_ACTIVE_BIT (1UL << 63) +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) #define ICH_LR_PHYS_ID_SHIFT 32 -#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) #define ICH_MISR_EOI (1 << 0) #define ICH_MISR_U (1 << 1) @@ -292,19 +291,8 @@ #define ICH_VMCR_PMR_SHIFT 24 #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) -#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) -#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) -#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) -#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) -#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) -#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) -#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) -#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) - #define ICC_IAR1_EL1_SPURIOUS 0x3ff -#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) - #define ICC_SRE_EL2_SRE (1 << 0) #define ICC_SRE_EL2_ENABLE (1 << 3) @@ -320,54 +308,10 @@ #define ICC_SGI1R_AFFINITY_3_SHIFT 48 #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) -/* - * System register definitions - */ -#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) -#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) -#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) -#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) -#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) -#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) -#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) - -#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) -#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) - -#define ICH_LR0_EL2 __LR0_EL2(0) -#define ICH_LR1_EL2 __LR0_EL2(1) -#define ICH_LR2_EL2 __LR0_EL2(2) -#define ICH_LR3_EL2 __LR0_EL2(3) -#define ICH_LR4_EL2 __LR0_EL2(4) -#define ICH_LR5_EL2 __LR0_EL2(5) -#define ICH_LR6_EL2 __LR0_EL2(6) -#define ICH_LR7_EL2 __LR0_EL2(7) -#define ICH_LR8_EL2 __LR8_EL2(0) -#define ICH_LR9_EL2 __LR8_EL2(1) -#define ICH_LR10_EL2 __LR8_EL2(2) -#define ICH_LR11_EL2 __LR8_EL2(3) -#define ICH_LR12_EL2 __LR8_EL2(4) -#define ICH_LR13_EL2 __LR8_EL2(5) -#define ICH_LR14_EL2 __LR8_EL2(6) -#define ICH_LR15_EL2 __LR8_EL2(7) - -#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) -#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) -#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) -#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) -#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) - -#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) -#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) -#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) -#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) -#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) +#include <asm/arch_gicv3.h> #ifndef __ASSEMBLY__ -#include <linux/stringify.h> -#include <asm/msi.h> - /* * We need a value to serve as a irq-type for LPIs. Choose one that will * hopefully pique the interest of the reviewer. @@ -385,23 +329,26 @@ struct rdists { u64 flags; }; -static inline void gic_write_eoir(u64 irq) -{ - asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); - isb(); -} - -static inline void gic_write_dir(u64 irq) -{ - asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq)); - isb(); -} - struct irq_domain; int its_cpu_init(void); int its_init(struct device_node *node, struct rdists *rdists, struct irq_domain *domain); +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + #endif #endif diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index b8901dfd9e95..bae69e5d693c 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -100,16 +100,11 @@ struct device_node; -void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, - u32 offset, struct device_node *); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); -static inline void gic_init(unsigned int nr, int start, - void __iomem *dist , void __iomem *cpu) -{ - gic_init_bases(nr, start, dist, cpu, 0, NULL); -} +void gic_init(unsigned int nr, int start, + void __iomem *dist , void __iomem *cpu); int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..d5e5c5bef28c 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -5,9 +5,10 @@ * helpful for interrupt controllers to implement mapping between hardware * irq numbers and the Linux irq number space. * - * irq_domains also have a hook for translating device tree interrupt - * representation into a hardware irq number that can be mapped back to a - * Linux irq number without any extra platform support code. + * irq_domains also have hooks for translating device tree or other + * firmware interrupt representations into a hardware irq number that + * can be mapped back to a Linux irq number without any extra platform + * support code. * * Interrupt controller "domain" data structure. This could be defined as a * irq domain controller. That is, it handles the mapping between hardware @@ -17,16 +18,12 @@ * model). It's the domain callbacks that are responsible for setting the * irq_chip on a given irq_desc after it's been mapped. * - * The host code and data structures are agnostic to whether or not - * we use an open firmware device-tree. We do have references to struct - * device_node in two places: in irq_find_host() to find the host matching - * a given interrupt controller node, and of course as an argument to its - * counterpart domain->ops->match() callback. However, those are treated as - * generic pointers by the core and the fact that it's actually a device-node - * pointer is purely a convention between callers and implementation. This - * code could thus be used on other architectures by replacing those two - * by some sort of arch-specific void * "token" used to identify interrupt - * controllers. + * The host code and data structures use a fwnode_handle pointer to + * identify the domain. In some cases, and in order to preserve source + * code compatibility, this fwnode pointer is "upgraded" to a DT + * device_node. For those firmware infrastructures that do not provide + * a unique identifier for an interrupt controller, the irq_domain + * code offers a fwnode allocator. */ #ifndef _LINUX_IRQDOMAIN_H @@ -34,6 +31,7 @@ #include <linux/types.h> #include <linux/irqhandler.h> +#include <linux/of.h> #include <linux/radix-tree.h> struct device_node; @@ -45,6 +43,24 @@ struct irq_data; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 +#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 + +/** + * struct irq_fwspec - generic IRQ specifier structure + * + * @fwnode: Pointer to a firmware-specific descriptor + * @param_count: Number of device-specific parameters + * @param: Device-specific parameters + * + * This structure, directly modeled after of_phandle_args, is used to + * pass a device-specific description of an interrupt. + */ +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; +}; + /* * Should several domains have the same device node, but serve * different purposes (for example one domain is for PCI/MSI, and the @@ -91,6 +107,8 @@ struct irq_domain_ops { unsigned int nr_irqs); void (*activate)(struct irq_domain *d, struct irq_data *irq_data); void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); + int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type); #endif }; @@ -130,7 +148,7 @@ struct irq_domain { unsigned int flags; /* Optional data */ - struct device_node *of_node; + struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY @@ -161,8 +179,15 @@ enum { IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), }; +static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) +{ + return to_of_node(d->fwnode); +} + #ifdef CONFIG_IRQ_DOMAIN -struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, +struct fwnode_handle *irq_domain_alloc_fwnode(void *data); +void irq_domain_free_fwnode(struct fwnode_handle *fwnode); +struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); @@ -177,10 +202,21 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); -extern struct irq_domain *irq_find_matching_host(struct device_node *node, - enum irq_domain_bus_token bus_token); +extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, + enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); +static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) +{ + return node ? &node->fwnode : NULL; +} + +static inline struct irq_domain *irq_find_matching_host(struct device_node *node, + enum irq_domain_bus_token bus_token) +{ + return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); +} + static inline struct irq_domain *irq_find_host(struct device_node *node) { return irq_find_matching_host(node, DOMAIN_BUS_ANY); @@ -198,14 +234,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node, size, size, 0, ops, host_data); + return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); } static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data); + return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); } static inline struct irq_domain *irq_domain_add_legacy_isa( struct device_node *of_node, @@ -219,7 +255,22 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data); + return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data); +} + +static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, + unsigned int size, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(fwnode, size, size, 0, ops, host_data); +} + +static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data); } extern void irq_domain_remove(struct irq_domain *host); @@ -234,6 +285,7 @@ extern void irq_domain_disassociate(struct irq_domain *domain, extern unsigned int irq_create_mapping(struct irq_domain *host, irq_hw_number_t hwirq); +extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); /** @@ -285,10 +337,23 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, +extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, - struct device_node *node, + struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data); + +static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, + unsigned int flags, + unsigned int size, + struct device_node *node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_create_hierarchy(parent, flags, size, + of_node_to_fwnode(node), + ops, host_data); +} + extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc); diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index e374e369fb2f..eb1bdcf95f2e 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h @@ -3,7 +3,7 @@ /** * enum irqreturn - * @IRQ_NONE interrupt was not from this device + * @IRQ_NONE interrupt was not from this device or was not handled * @IRQ_HANDLED interrupt was handled by this device * @IRQ_WAKE_THREAD handler requests to wake the handler thread */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index df07e78487d5..65407f6c9120 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -278,6 +278,7 @@ typedef struct journal_superblock_s /* 0x0400 */ } journal_superblock_t; +/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */ #define JBD2_HAS_COMPAT_FEATURE(j,mask) \ ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) @@ -288,7 +289,7 @@ typedef struct journal_superblock_s ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) -#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 +#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 #define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 @@ -296,6 +297,8 @@ typedef struct journal_superblock_s #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 +/* See "journal feature predicate functions" below */ + /* Features known to this kernel version: */ #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM #define JBD2_KNOWN_ROCOMPAT_FEATURES 0 @@ -1034,6 +1037,69 @@ struct journal_s __u32 j_csum_seed; }; +/* journal feature predicate functions */ +#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_compat & \ + cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_compat |= \ + cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_compat &= \ + ~cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ +} + +#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_ro_compat & \ + cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_ro_compat |= \ + cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_ro_compat &= \ + ~cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ +} + +#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_incompat & \ + cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_incompat |= \ + cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_incompat &= \ + ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ +} + +JBD2_FEATURE_COMPAT_FUNCS(checksum, CHECKSUM) + +JBD2_FEATURE_INCOMPAT_FUNCS(revoke, REVOKE) +JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) +JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) +JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) +JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) + /* * Journal flag definitions */ @@ -1046,6 +1112,7 @@ struct journal_s #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ +#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer @@ -1338,13 +1405,17 @@ static inline int tid_geq(tid_t x, tid_t y) extern int jbd2_journal_blocks_per_page(struct inode *inode); extern size_t journal_tag_bytes(journal_t *journal); +static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j) +{ + return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j); +} + static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) { - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) || - JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) - return 1; + WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) && + journal->j_chksum_driver == NULL); - return 0; + return journal->j_chksum_driver != NULL; } /* @@ -1444,4 +1515,7 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal) #endif /* __KERNEL__ */ +#define EFSBADCRC EBADMSG /* Bad CRC detected */ +#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ + #endif /* _LINUX_JBD2_H */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f1094238ab2a..8dde55974f18 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -214,11 +214,6 @@ static inline int jump_label_apply_nops(struct module *mod) #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled -static inline bool static_key_enabled(struct static_key *key) -{ - return static_key_count(key) > 0; -} - static inline void static_key_enable(struct static_key *key) { int count = static_key_count(key); @@ -265,6 +260,17 @@ struct static_key_false { #define DEFINE_STATIC_KEY_FALSE(name) \ struct static_key_false name = STATIC_KEY_FALSE_INIT +extern bool ____wrong_branch_error(void); + +#define static_key_enabled(x) \ +({ \ + if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \ + !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\ + !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + ____wrong_branch_error(); \ + static_key_count((struct static_key *)x) > 0; \ +}) + #ifdef HAVE_JUMP_LABEL /* @@ -323,8 +329,6 @@ struct static_key_false { * See jump_label_type() / jump_label_init_type(). */ -extern bool ____wrong_branch_error(void); - #define static_branch_likely(x) \ ({ \ bool branch; \ diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index c838abe3ee0a..052c7b32cc91 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h @@ -20,7 +20,7 @@ }) /* acceptable for old filesystems */ -static inline int old_valid_dev(dev_t dev) +static inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } @@ -35,7 +35,7 @@ static inline dev_t old_decode_dev(u16 val) return MKDEV((val >> 8) & 255, val & 255); } -static inline int new_valid_dev(dev_t dev) +static inline bool new_valid_dev(dev_t dev) { return 1; } @@ -54,11 +54,6 @@ static inline dev_t new_decode_dev(u32 dev) return MKDEV(major, minor); } -static inline int huge_valid_dev(dev_t dev) -{ - return 1; -} - static inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5582410727cb..350dfb08aee3 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -200,28 +200,28 @@ extern int _cond_resched(void); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) -/* - * abs() handles unsigned and signed longs, ints, shorts and chars. For all - * input types abs() returns a signed long. - * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() - * for those. +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first + * (s64, long or int depending on its size). + * + * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, + * otherwise it is signed long. */ -#define abs(x) ({ \ - long ret; \ - if (sizeof(x) == sizeof(long)) { \ - long __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } else { \ - int __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } \ - ret; \ - }) - -#define abs64(x) ({ \ - s64 __x = (x); \ - (__x < 0) ? -__x : __x; \ - }) +#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ + s64 __x = (x); \ + (__x < 0) ? -__x : __x; \ + }), ({ \ + long ret; \ + if (sizeof(x) == sizeof(long)) { \ + long __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } else { \ + int __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } \ + ret; \ + })) /** * reciprocal_scale - "scale" a value into range [0, ep_ro) @@ -413,6 +413,8 @@ extern __printf(2, 3) char *kasprintf(gfp_t gfp, const char *fmt, ...); extern __printf(2, 0) char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); +extern __printf(2, 0) +const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); extern __scanf(2, 3) int sscanf(const char *, const char *, ...); diff --git a/include/linux/key-type.h b/include/linux/key-type.h index ff9f1d394235..7463355a198b 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -40,8 +40,7 @@ struct key_construction { */ struct key_preparsed_payload { char *description; /* Proposed key description (or NULL) */ - void *type_data[2]; /* Private key-type data */ - void *payload[2]; /* Proposed payload */ + union key_payload payload; /* Proposed payload */ const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ diff --git a/include/linux/key.h b/include/linux/key.h index e1d4715f3222..66f705243985 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -89,6 +89,11 @@ struct keyring_index_key { size_t desc_len; }; +union key_payload { + void __rcu *rcu_data0; + void *data[4]; +}; + /*****************************************************************************/ /* * key reference with possession attribute handling @@ -186,28 +191,18 @@ struct key { }; }; - /* type specific data - * - this is used by the keyring type to index the name - */ - union { - struct list_head link; - unsigned long x[2]; - void *p[2]; - int reject_error; - } type_data; - /* key data * - this is used to hold the data actually used in cryptography or * whatever */ union { - union { - unsigned long value; - void __rcu *rcudata; - void *data; - void *data2[2]; - } payload; - struct assoc_array keys; + union key_payload payload; + struct { + /* Keyring bits */ + struct list_head name_link; + struct assoc_array keys; + }; + int reject_error; }; }; @@ -336,12 +331,12 @@ static inline bool key_is_instantiated(const struct key *key) } #define rcu_dereference_key(KEY) \ - (rcu_dereference_protected((KEY)->payload.rcudata, \ + (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) #define rcu_assign_keypointer(KEY, PAYLOAD) \ do { \ - rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \ + rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \ } while (0) #ifdef CONFIG_SYSCTL diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 637f67002c5a..e6284591599e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -66,7 +66,7 @@ struct kobject { struct kobject *parent; struct kset *kset; struct kobj_type *ktype; - struct kernfs_node *sd; + struct kernfs_node *sd; /* sysfs directory entry */ struct kref kref; #ifdef CONFIG_DEBUG_KOBJECT_RELEASE struct delayed_work release; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1bef9e21e725..242a6d2b53ff 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -24,6 +24,7 @@ #include <linux/err.h> #include <linux/irqflags.h> #include <linux/context_tracking.h> +#include <linux/irqbypass.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -140,6 +141,8 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_APIC_PAGE_RELOAD 25 #define KVM_REQ_SMI 26 #define KVM_REQ_HV_CRASH 27 +#define KVM_REQ_IOAPIC_EOI_EXIT 28 +#define KVM_REQ_HV_RESET 29 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -231,6 +234,9 @@ struct kvm_vcpu { unsigned long requests; unsigned long guest_debug; + int pre_pcpu; + struct list_head blocked_vcpu_list; + struct mutex mutex; struct kvm_run *run; @@ -329,6 +335,18 @@ struct kvm_kernel_irq_routing_entry { struct hlist_node link; }; +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING +struct kvm_irq_routing_table { + int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; + u32 nr_rt_entries; + /* + * Array indexed by gsi. Each entry contains list of irq chips + * the gsi is connected to. + */ + struct hlist_head map[0]; +}; +#endif + #ifndef KVM_PRIVATE_MEM_SLOTS #define KVM_PRIVATE_MEM_SLOTS 0 #endif @@ -455,10 +473,14 @@ void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); +void kvm_arch_irq_routing_update(struct kvm *kvm); #else static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } +static inline void kvm_arch_irq_routing_update(struct kvm *kvm) +{ +} #endif #ifdef CONFIG_HAVE_KVM_IRQFD @@ -625,6 +647,8 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); @@ -803,10 +827,13 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); -int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, int irq_source_id, int level, bool line_status); +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status); bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); +void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); @@ -1002,6 +1029,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) #endif int kvm_setup_default_irq_routing(struct kvm *kvm); +int kvm_setup_empty_irq_routing(struct kvm *kvm); int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, @@ -1144,5 +1172,15 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ -#endif +#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); +void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); +int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, + uint32_t guest_irq, bool set); +#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ +#endif diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h new file mode 100644 index 000000000000..0c1de05098c8 --- /dev/null +++ b/include/linux/kvm_irqfd.h @@ -0,0 +1,71 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * irqfd: Allows an fd to be used to inject an interrupt to the guest + * Credit goes to Avi Kivity for the original idea. + */ + +#ifndef __LINUX_KVM_IRQFD_H +#define __LINUX_KVM_IRQFD_H + +#include <linux/kvm_host.h> +#include <linux/poll.h> + +/* + * Resampling irqfds are a special variety of irqfds used to emulate + * level triggered interrupts. The interrupt is asserted on eventfd + * trigger. On acknowledgment through the irq ack notifier, the + * interrupt is de-asserted and userspace is notified through the + * resamplefd. All resamplers on the same gsi are de-asserted + * together, so we don't need to track the state of each individual + * user. We can also therefore share the same irq source ID. + */ +struct kvm_kernel_irqfd_resampler { + struct kvm *kvm; + /* + * List of resampling struct _irqfd objects sharing this gsi. + * RCU list modified under kvm->irqfds.resampler_lock + */ + struct list_head list; + struct kvm_irq_ack_notifier notifier; + /* + * Entry in list of kvm->irqfd.resampler_list. Use for sharing + * resamplers among irqfds on the same gsi. + * Accessed and modified under kvm->irqfds.resampler_lock + */ + struct list_head link; +}; + +struct kvm_kernel_irqfd { + /* Used for MSI fast-path */ + struct kvm *kvm; + wait_queue_t wait; + /* Update side is protected by irqfds.lock */ + struct kvm_kernel_irq_routing_entry irq_entry; + seqcount_t irq_entry_sc; + /* Used for level IRQ fast-path */ + int gsi; + struct work_struct inject; + /* The resampler used by this irqfd (resampler-only) */ + struct kvm_kernel_irqfd_resampler *resampler; + /* Eventfd notified on resample (resampler-only) */ + struct eventfd_ctx *resamplefd; + /* Entry in list of irqfds for a resampler (resampler-only) */ + struct list_head resampler_link; + /* Used for setup/shutdown */ + struct eventfd_ctx *eventfd; + struct list_head list; + poll_table pt; + struct work_struct shutdown; + struct irq_bypass_consumer consumer; + struct irq_bypass_producer *producer; +}; + +#endif /* __LINUX_KVM_IRQFD_H */ diff --git a/include/linux/leds.h b/include/linux/leds.h index b122eeafb5dc..fa359c79c825 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -283,6 +283,13 @@ static inline void led_trigger_register_simple(const char *name, static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} static inline void led_trigger_event(struct led_trigger *trigger, enum led_brightness event) {} +static inline void led_trigger_blink(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off) {} +static inline void led_trigger_blink_oneshot(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off, + int invert) {} static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} static inline void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger) {} diff --git a/include/linux/libata.h b/include/linux/libata.h index c9cfbcdb8d14..83577f8fd15b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -254,6 +254,7 @@ enum { ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ + ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */ /* struct ata_queued_cmd flags */ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h new file mode 100644 index 000000000000..5ebd70d12f35 --- /dev/null +++ b/include/linux/lightnvm.h @@ -0,0 +1,522 @@ +#ifndef NVM_H +#define NVM_H + +enum { + NVM_IO_OK = 0, + NVM_IO_REQUEUE = 1, + NVM_IO_DONE = 2, + NVM_IO_ERR = 3, + + NVM_IOTYPE_NONE = 0, + NVM_IOTYPE_GC = 1, +}; + +#ifdef CONFIG_NVM + +#include <linux/blkdev.h> +#include <linux/types.h> +#include <linux/file.h> +#include <linux/dmapool.h> + +enum { + /* HW Responsibilities */ + NVM_RSP_L2P = 1 << 0, + NVM_RSP_ECC = 1 << 1, + + /* Physical Adressing Mode */ + NVM_ADDRMODE_LINEAR = 0, + NVM_ADDRMODE_CHANNEL = 1, + + /* Plane programming mode for LUN */ + NVM_PLANE_SINGLE = 0, + NVM_PLANE_DOUBLE = 1, + NVM_PLANE_QUAD = 2, + + /* Status codes */ + NVM_RSP_SUCCESS = 0x0, + NVM_RSP_NOT_CHANGEABLE = 0x1, + NVM_RSP_ERR_FAILWRITE = 0x40ff, + NVM_RSP_ERR_EMPTYPAGE = 0x42ff, + + /* Device opcodes */ + NVM_OP_HBREAD = 0x02, + NVM_OP_HBWRITE = 0x81, + NVM_OP_PWRITE = 0x91, + NVM_OP_PREAD = 0x92, + NVM_OP_ERASE = 0x90, + + /* PPA Command Flags */ + NVM_IO_SNGL_ACCESS = 0x0, + NVM_IO_DUAL_ACCESS = 0x1, + NVM_IO_QUAD_ACCESS = 0x2, + + NVM_IO_SUSPEND = 0x80, + NVM_IO_SLC_MODE = 0x100, + NVM_IO_SCRAMBLE_DISABLE = 0x200, +}; + +struct nvm_id_group { + u8 mtype; + u8 fmtype; + u16 res16; + u8 num_ch; + u8 num_lun; + u8 num_pln; + u16 num_blk; + u16 num_pg; + u16 fpg_sz; + u16 csecs; + u16 sos; + u32 trdt; + u32 trdm; + u32 tprt; + u32 tprm; + u32 tbet; + u32 tbem; + u32 mpos; + u16 cpar; + u8 res[913]; +} __packed; + +struct nvm_addr_format { + u8 ch_offset; + u8 ch_len; + u8 lun_offset; + u8 lun_len; + u8 pln_offset; + u8 pln_len; + u8 blk_offset; + u8 blk_len; + u8 pg_offset; + u8 pg_len; + u8 sect_offset; + u8 sect_len; + u8 res[4]; +}; + +struct nvm_id { + u8 ver_id; + u8 vmnt; + u8 cgrps; + u8 res[5]; + u32 cap; + u32 dom; + struct nvm_addr_format ppaf; + u8 ppat; + u8 resv[224]; + struct nvm_id_group groups[4]; +} __packed; + +struct nvm_target { + struct list_head list; + struct nvm_tgt_type *type; + struct gendisk *disk; +}; + +struct nvm_tgt_instance { + struct nvm_tgt_type *tt; +}; + +#define ADDR_EMPTY (~0ULL) + +#define NVM_VERSION_MAJOR 1 +#define NVM_VERSION_MINOR 0 +#define NVM_VERSION_PATCH 0 + +#define NVM_SEC_BITS (8) +#define NVM_PL_BITS (6) +#define NVM_PG_BITS (16) +#define NVM_BLK_BITS (16) +#define NVM_LUN_BITS (10) +#define NVM_CH_BITS (8) + +struct ppa_addr { + union { + /* Channel-based PPA format in nand 4x2x2x2x8x10 */ + struct { + u64 ch : 4; + u64 sec : 2; /* 4 sectors per page */ + u64 pl : 2; /* 4 planes per LUN */ + u64 lun : 2; /* 4 LUNs per channel */ + u64 pg : 8; /* 256 pages per block */ + u64 blk : 10;/* 1024 blocks per plane */ + u64 resved : 36; + } chnl; + + /* Generic structure for all addresses */ + struct { + u64 sec : NVM_SEC_BITS; + u64 pl : NVM_PL_BITS; + u64 pg : NVM_PG_BITS; + u64 blk : NVM_BLK_BITS; + u64 lun : NVM_LUN_BITS; + u64 ch : NVM_CH_BITS; + } g; + + u64 ppa; + }; +} __packed; + +struct nvm_rq { + struct nvm_tgt_instance *ins; + struct nvm_dev *dev; + + struct bio *bio; + + union { + struct ppa_addr ppa_addr; + dma_addr_t dma_ppa_list; + }; + + struct ppa_addr *ppa_list; + + void *metadata; + dma_addr_t dma_metadata; + + uint8_t opcode; + uint16_t nr_pages; + uint16_t flags; +}; + +static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) +{ + return pdu - sizeof(struct nvm_rq); +} + +static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) +{ + return rqdata + 1; +} + +struct nvm_block; + +typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); +typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); +typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); +typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, + nvm_l2p_update_fn *, void *); +typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, + nvm_bb_update_fn *, void *); +typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); +typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); +typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *); +typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *); +typedef void (nvm_destroy_dma_pool_fn)(void *); +typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t, + dma_addr_t *); +typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); + +struct nvm_dev_ops { + nvm_id_fn *identity; + nvm_get_l2p_tbl_fn *get_l2p_tbl; + nvm_op_bb_tbl_fn *get_bb_tbl; + nvm_op_set_bb_fn *set_bb; + + nvm_submit_io_fn *submit_io; + nvm_erase_blk_fn *erase_block; + + nvm_create_dma_pool_fn *create_dma_pool; + nvm_destroy_dma_pool_fn *destroy_dma_pool; + nvm_dev_dma_alloc_fn *dev_dma_alloc; + nvm_dev_dma_free_fn *dev_dma_free; + + uint8_t max_phys_sect; +}; + +struct nvm_lun { + int id; + + int lun_id; + int chnl_id; + + unsigned int nr_free_blocks; /* Number of unused blocks */ + struct nvm_block *blocks; + + spinlock_t lock; +}; + +struct nvm_block { + struct list_head list; + struct nvm_lun *lun; + unsigned long id; + + void *priv; + int type; +}; + +struct nvm_dev { + struct nvm_dev_ops *ops; + + struct list_head devices; + struct list_head online_targets; + + /* Media manager */ + struct nvmm_type *mt; + void *mp; + + /* Device information */ + int nr_chnls; + int nr_planes; + int luns_per_chnl; + int sec_per_pg; /* only sectors for a single page */ + int pgs_per_blk; + int blks_per_lun; + int sec_size; + int oob_size; + int addr_mode; + struct nvm_addr_format addr_format; + + /* Calculated/Cached values. These do not reflect the actual usable + * blocks at run-time. + */ + int max_rq_size; + int plane_mode; /* drive device in single, double or quad mode */ + + int sec_per_pl; /* all sectors across planes */ + int sec_per_blk; + int sec_per_lun; + + unsigned long total_pages; + unsigned long total_blocks; + int nr_luns; + unsigned max_pages_per_blk; + + void *ppalist_pool; + + struct nvm_id identity; + + /* Backend device */ + struct request_queue *q; + char name[DISK_NAME_LEN]; +}; + +/* fallback conversion */ +static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct ppa_addr l; + + l.ppa = r.g.sec + + r.g.pg * dev->sec_per_pg + + r.g.blk * (dev->pgs_per_blk * + dev->sec_per_pg) + + r.g.lun * (dev->blks_per_lun * + dev->pgs_per_blk * + dev->sec_per_pg) + + r.g.ch * (dev->blks_per_lun * + dev->pgs_per_blk * + dev->luns_per_chnl * + dev->sec_per_pg); + + return l; +} + +/* fallback conversion */ +static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct ppa_addr l; + int secs, pgs, blks, luns; + sector_t ppa = r.ppa; + + l.ppa = 0; + + div_u64_rem(ppa, dev->sec_per_pg, &secs); + l.g.sec = secs; + + sector_div(ppa, dev->sec_per_pg); + div_u64_rem(ppa, dev->sec_per_blk, &pgs); + l.g.pg = pgs; + + sector_div(ppa, dev->pgs_per_blk); + div_u64_rem(ppa, dev->blks_per_lun, &blks); + l.g.blk = blks; + + sector_div(ppa, dev->blks_per_lun); + div_u64_rem(ppa, dev->luns_per_chnl, &luns); + l.g.lun = luns; + + sector_div(ppa, dev->luns_per_chnl); + l.g.ch = ppa; + + return l; +} + +static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) +{ + struct ppa_addr l; + + l.ppa = 0; + + l.chnl.sec = r.g.sec; + l.chnl.pl = r.g.pl; + l.chnl.pg = r.g.pg; + l.chnl.blk = r.g.blk; + l.chnl.lun = r.g.lun; + l.chnl.ch = r.g.ch; + + return l; +} + +static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) +{ + struct ppa_addr l; + + l.ppa = 0; + + l.g.sec = r.chnl.sec; + l.g.pl = r.chnl.pl; + l.g.pg = r.chnl.pg; + l.g.blk = r.chnl.blk; + l.g.lun = r.chnl.lun; + l.g.ch = r.chnl.ch; + + return l; +} + +static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, + struct ppa_addr gppa) +{ + switch (dev->addr_mode) { + case NVM_ADDRMODE_LINEAR: + return __linear_to_generic_addr(dev, gppa); + case NVM_ADDRMODE_CHANNEL: + return __chnl_to_generic_addr(gppa); + default: + BUG(); + } + return gppa; +} + +static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, + struct ppa_addr gppa) +{ + switch (dev->addr_mode) { + case NVM_ADDRMODE_LINEAR: + return __generic_to_linear_addr(dev, gppa); + case NVM_ADDRMODE_CHANNEL: + return __generic_to_chnl_addr(gppa); + default: + BUG(); + } + return gppa; +} + +static inline int ppa_empty(struct ppa_addr ppa_addr) +{ + return (ppa_addr.ppa == ADDR_EMPTY); +} + +static inline void ppa_set_empty(struct ppa_addr *ppa_addr) +{ + ppa_addr->ppa = ADDR_EMPTY; +} + +static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, + struct nvm_block *blk) +{ + struct ppa_addr ppa; + struct nvm_lun *lun = blk->lun; + + ppa.ppa = 0; + ppa.g.blk = blk->id % dev->blks_per_lun; + ppa.g.lun = lun->lun_id; + ppa.g.ch = lun->chnl_id; + + return ppa; +} + +typedef void (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); +typedef sector_t (nvm_tgt_capacity_fn)(void *); +typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int); +typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); +typedef void (nvm_tgt_exit_fn)(void *); + +struct nvm_tgt_type { + const char *name; + unsigned int version[3]; + + /* target entry points */ + nvm_tgt_make_rq_fn *make_rq; + nvm_tgt_capacity_fn *capacity; + nvm_tgt_end_io_fn *end_io; + + /* module-specific init/teardown */ + nvm_tgt_init_fn *init; + nvm_tgt_exit_fn *exit; + + /* For internal use */ + struct list_head list; +}; + +extern int nvm_register_target(struct nvm_tgt_type *); +extern void nvm_unregister_target(struct nvm_tgt_type *); + +extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); +extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); + +typedef int (nvmm_register_fn)(struct nvm_dev *); +typedef void (nvmm_unregister_fn)(struct nvm_dev *); +typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, + struct nvm_lun *, unsigned long); +typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); +typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, + unsigned long); +typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); +typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); + +struct nvmm_type { + const char *name; + unsigned int version[3]; + + nvmm_register_fn *register_mgr; + nvmm_unregister_fn *unregister_mgr; + + /* Block administration callbacks */ + nvmm_get_blk_fn *get_blk; + nvmm_put_blk_fn *put_blk; + nvmm_open_blk_fn *open_blk; + nvmm_close_blk_fn *close_blk; + nvmm_flush_blk_fn *flush_blk; + + nvmm_submit_io_fn *submit_io; + nvmm_end_io_fn *end_io; + nvmm_erase_blk_fn *erase_blk; + + /* Configuration management */ + nvmm_get_lun_fn *get_lun; + + /* Statistics */ + nvmm_free_blocks_print_fn *free_blocks_print; + struct list_head list; +}; + +extern int nvm_register_mgr(struct nvmm_type *); +extern void nvm_unregister_mgr(struct nvmm_type *); + +extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, + unsigned long); +extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); + +extern int nvm_register(struct request_queue *, char *, + struct nvm_dev_ops *); +extern void nvm_unregister(char *); + +extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); +extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); +#else /* CONFIG_NVM */ +struct nvm_dev_ops; + +static inline int nvm_register(struct request_queue *q, char *disk_name, + struct nvm_dev_ops *ops) +{ + return -EINVAL; +} +static inline void nvm_unregister(char *disk_name) {} +#endif /* CONFIG_NVM */ +#endif /* LIGHTNVM.H */ diff --git a/include/linux/list.h b/include/linux/list.h index 3e3e64a61002..993395a2e55c 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -87,7 +87,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; - prev->next = next; + WRITE_ONCE(prev->next, next); } /** @@ -615,7 +615,8 @@ static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; - *pprev = next; + + WRITE_ONCE(*pprev, next); if (next) next->pprev = pprev; } diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 2eb88556c5c5..8132214e8efd 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -93,9 +93,10 @@ static inline void __hlist_bl_del(struct hlist_bl_node *n) LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); /* pprev may be `first`, so be careful not to lose the lock bit */ - *pprev = (struct hlist_bl_node *) + WRITE_ONCE(*pprev, + (struct hlist_bl_node *) ((unsigned long)next | - ((unsigned long)*pprev & LIST_BL_LOCKMASK)); + ((unsigned long)*pprev & LIST_BL_LOCKMASK))); if (next) next->pprev = pprev; } diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index f266661d2666..444d2b1313bd 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -76,7 +76,8 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n) { struct hlist_nulls_node *next = n->next; struct hlist_nulls_node **pprev = n->pprev; - *pprev = next; + + WRITE_ONCE(*pprev, next); if (!is_a_nulls(next)) next->pprev = pprev; } diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 0962b2ca628a..e746919530f5 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -8,8 +8,8 @@ struct mei_cl_device; struct mei_device; -typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, - u32 events, void *context); +typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, + u32 events, void *context); /** * struct mei_cl_device - MEI device handle @@ -45,7 +45,7 @@ struct mei_cl_device { char name[MEI_CL_NAME_SIZE]; struct work_struct event_work; - mei_cl_event_cb_t event_cb; + mei_cldev_event_cb_t event_cb; void *event_context; unsigned long events_mask; unsigned long events; @@ -62,33 +62,37 @@ struct mei_cl_driver { const struct mei_cl_device_id *id_table; - int (*probe)(struct mei_cl_device *dev, + int (*probe)(struct mei_cl_device *cldev, const struct mei_cl_device_id *id); - int (*remove)(struct mei_cl_device *dev); + int (*remove)(struct mei_cl_device *cldev); }; -int __mei_cl_driver_register(struct mei_cl_driver *driver, +int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, struct module *owner); -#define mei_cl_driver_register(driver) \ - __mei_cl_driver_register(driver, THIS_MODULE) +#define mei_cldev_driver_register(cldrv) \ + __mei_cldev_driver_register(cldrv, THIS_MODULE) -void mei_cl_driver_unregister(struct mei_cl_driver *driver); +void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); -ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); -ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); -int mei_cl_register_event_cb(struct mei_cl_device *device, - unsigned long event_mask, - mei_cl_event_cb_t read_cb, void *context); +int mei_cldev_register_event_cb(struct mei_cl_device *cldev, + unsigned long event_mask, + mei_cldev_event_cb_t read_cb, void *context); #define MEI_CL_EVENT_RX 0 #define MEI_CL_EVENT_TX 1 #define MEI_CL_EVENT_NOTIF 2 -void *mei_cl_get_drvdata(const struct mei_cl_device *device); -void mei_cl_set_drvdata(struct mei_cl_device *device, void *data); +const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); +u8 mei_cldev_ver(const struct mei_cl_device *cldev); -int mei_cl_enable_device(struct mei_cl_device *device); -int mei_cl_disable_device(struct mei_cl_device *device); +void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev); +void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); + +int mei_cldev_enable(struct mei_cl_device *cldev); +int mei_cldev_disable(struct mei_cl_device *cldev); +bool mei_cldev_enabled(struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index c518eb589260..24daf8fc4d7c 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -89,10 +89,6 @@ int memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags); -int memblock_remove_range(struct memblock_type *type, - phys_addr_t base, - phys_addr_t size); - void __next_mem_range(u64 *idx, int nid, ulong flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6452ff4c463f..cd0e2413c358 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -213,6 +213,9 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; + /* handle for "memory.events" */ + struct cgroup_file events_file; + /* protect arrays of thresholds */ struct mutex thresholds_lock; @@ -285,6 +288,7 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg, unsigned int nr) { this_cpu_add(memcg->stat->events[idx], nr); + cgroup_file_notify(&memcg->events_file); } bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); @@ -297,8 +301,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); -void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, - bool lrucare); +void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); @@ -346,9 +349,7 @@ ino_t page_cgroup_ino(struct page *page); static inline bool mem_cgroup_disabled(void) { - if (memory_cgrp_subsys.disabled) - return true; - return false; + return !cgroup_subsys_enabled(memory_cgrp_subsys); } /* @@ -382,7 +383,7 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) return mz->lru_size[lru]; } -static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) +static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) { unsigned long inactive_ratio; unsigned long inactive; @@ -401,24 +402,26 @@ static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) return inactive * inactive_ratio < active; } +void mem_cgroup_handle_over_high(void); + void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); static inline void mem_cgroup_oom_enable(void) { - WARN_ON(current->memcg_oom.may_oom); - current->memcg_oom.may_oom = 1; + WARN_ON(current->memcg_may_oom); + current->memcg_may_oom = 1; } static inline void mem_cgroup_oom_disable(void) { - WARN_ON(!current->memcg_oom.may_oom); - current->memcg_oom.may_oom = 0; + WARN_ON(!current->memcg_may_oom); + current->memcg_may_oom = 0; } static inline bool task_in_memcg_oom(struct task_struct *p) { - return p->memcg_oom.memcg; + return p->memcg_in_oom; } bool mem_cgroup_oom_synchronize(bool wait); @@ -535,9 +538,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) { } -static inline void mem_cgroup_migrate(struct page *oldpage, - struct page *newpage, - bool lrucare) +static inline void mem_cgroup_replace_page(struct page *old, struct page *new) { } @@ -583,10 +584,10 @@ static inline bool mem_cgroup_disabled(void) return true; } -static inline int +static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) { - return 1; + return true; } static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) @@ -620,6 +621,10 @@ static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) { } +static inline void mem_cgroup_handle_over_high(void) +{ +} + static inline void mem_cgroup_oom_enable(void) { } @@ -676,8 +681,9 @@ enum { struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); -void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, - unsigned long *pdirty, unsigned long *pwriteback); +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback); #else /* CONFIG_CGROUP_WRITEBACK */ @@ -687,7 +693,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) } static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, - unsigned long *pavail, + unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback) { @@ -744,11 +751,10 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) * conditions, but because they are pretty simple, they are expected to be * fast. */ -bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, - int order); -void __memcg_kmem_commit_charge(struct page *page, - struct mem_cgroup *memcg, int order); -void __memcg_kmem_uncharge_pages(struct page *page, int order); +int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void __memcg_kmem_uncharge(struct page *page, int order); /* * helper for acessing a memcg's index. It will be used as an index in the @@ -763,77 +769,42 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep); -struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); - -int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, - unsigned long nr_pages); -void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); - -/** - * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. - * @gfp: the gfp allocation flags. - * @memcg: a pointer to the memcg this was charged against. - * @order: allocation order. - * - * returns true if the memcg where the current task belongs can hold this - * allocation. - * - * We return true automatically if this allocation is not to be accounted to - * any memcg. - */ -static inline bool -memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +static inline bool __memcg_kmem_bypass(gfp_t gfp) { if (!memcg_kmem_enabled()) return true; - if (gfp & __GFP_NOACCOUNT) return true; - /* - * __GFP_NOFAIL allocations will move on even if charging is not - * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it forcibly, but we hope - * those allocations are rare, and won't be worth the trouble. - */ - if (gfp & __GFP_NOFAIL) - return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; - - /* If the test is dying, just let it go. */ - if (unlikely(fatal_signal_pending(current))) - return true; - - return __memcg_kmem_newpage_charge(gfp, memcg, order); + return false; } /** - * memcg_kmem_uncharge_pages: uncharge pages from memcg - * @page: pointer to struct page being freed - * @order: allocation order. + * memcg_kmem_charge: charge a kmem page + * @page: page to charge + * @gfp: reclaim mode + * @order: allocation order + * + * Returns 0 on success, an error code on failure. */ -static inline void -memcg_kmem_uncharge_pages(struct page *page, int order) +static __always_inline int memcg_kmem_charge(struct page *page, + gfp_t gfp, int order) { - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge_pages(page, order); + if (__memcg_kmem_bypass(gfp)) + return 0; + return __memcg_kmem_charge(page, gfp, order); } /** - * memcg_kmem_commit_charge: embeds correct memcg in a page - * @page: pointer to struct page recently allocated - * @memcg: the memcg structure we charged against - * @order: allocation order. - * - * Needs to be called after memcg_kmem_newpage_charge, regardless of success or - * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit @page to @memcg. + * memcg_kmem_uncharge: uncharge a kmem page + * @page: page to uncharge + * @order: allocation order */ -static inline void -memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +static __always_inline void memcg_kmem_uncharge(struct page *page, int order) { - if (memcg_kmem_enabled() && memcg) - __memcg_kmem_commit_charge(page, memcg, order); + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge(page, order); } /** @@ -846,17 +817,8 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { - if (!memcg_kmem_enabled()) - return cachep; - if (gfp & __GFP_NOACCOUNT) - return cachep; - if (gfp & __GFP_NOFAIL) + if (__memcg_kmem_bypass(gfp)) return cachep; - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) - return cachep; - if (unlikely(fatal_signal_pending(current))) - return cachep; - return __memcg_kmem_get_cache(cachep); } @@ -865,13 +827,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) if (memcg_kmem_enabled()) __memcg_kmem_put_cache(cachep); } - -static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) -{ - if (!memcg_kmem_enabled()) - return NULL; - return __mem_cgroup_from_kmem(ptr); -} #else #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) @@ -886,18 +841,12 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) return false; } -static inline bool -memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { - return true; + return 0; } -static inline void memcg_kmem_uncharge_pages(struct page *page, int order) -{ -} - -static inline void -memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +static inline void memcg_kmem_uncharge(struct page *page, int order) { } @@ -923,11 +872,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) { } - -static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) -{ - return NULL; -} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ - diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 8f60e899b33c..2ea574ff9714 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -11,6 +11,7 @@ struct zone; struct pglist_data; struct mem_section; struct memory_block; +struct resource; #ifdef CONFIG_MEMORY_HOTPLUG @@ -266,6 +267,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); extern int add_memory(int nid, u64 start, u64 size); +extern int add_memory_resource(int nid, struct resource *resource); extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, bool for_device); extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index 8fcad63fab55..d409ceb2231e 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h @@ -21,6 +21,7 @@ enum { CHIP_INVALID = 0, CHIP_PM800, CHIP_PM805, + CHIP_PM860, CHIP_MAX, }; diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 1dc385850ba2..57b45caaea80 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h @@ -124,6 +124,9 @@ struct arizona_pdata { /** Channel to use for headphone detection */ unsigned int hpdet_channel; + /** Use software comparison to determine mic presence */ + bool micd_software_compare; + /** Extra debounce timeout used during initial mic detection (ms) */ unsigned int micd_detect_debounce; @@ -181,6 +184,9 @@ struct arizona_pdata { /** GPIO for primary IRQ (used for edge triggered emulation) */ int irq_gpio; + + /** General purpose switch control */ + unsigned int gpsw; }; #endif diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index fdd70b3c7418..cd7e78eae006 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -242,6 +242,7 @@ #define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 #define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 #define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 +#define ARIZONA_HP_TEST_CTRL_1 0x4A4 #define ARIZONA_SPK_CTRL_2 0x4B5 #define ARIZONA_SPK_CTRL_3 0x4B6 #define ARIZONA_DAC_COMP_1 0x4DC @@ -1064,6 +1065,16 @@ #define ARIZONA_CLOCK_CONTROL 0xF00 #define ARIZONA_ANC_SRC 0xF01 #define ARIZONA_DSP_STATUS 0xF02 +#define ARIZONA_ANC_COEFF_START 0xF08 +#define ARIZONA_ANC_COEFF_END 0xF12 +#define ARIZONA_FCL_FILTER_CONTROL 0xF15 +#define ARIZONA_FCL_ADC_REFORMATTER_CONTROL 0xF17 +#define ARIZONA_FCL_COEFF_START 0xF18 +#define ARIZONA_FCL_COEFF_END 0xF69 +#define ARIZONA_FCR_FILTER_CONTROL 0xF70 +#define ARIZONA_FCR_ADC_REFORMATTER_CONTROL 0xF72 +#define ARIZONA_FCR_COEFF_START 0xF73 +#define ARIZONA_FCR_COEFF_END 0xFC4 #define ARIZONA_DSP1_CONTROL_1 0x1100 #define ARIZONA_DSP1_CLOCKING_1 0x1101 #define ARIZONA_DSP1_STATUS_1 0x1104 @@ -2359,9 +2370,9 @@ #define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */ #define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */ #define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */ -#define ARIZONA_ACCDET_MODE_MASK 0x0003 /* ACCDET_MODE - [1:0] */ -#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [1:0] */ -#define ARIZONA_ACCDET_MODE_WIDTH 2 /* ACCDET_MODE - [1:0] */ +#define ARIZONA_ACCDET_MODE_MASK 0x0007 /* ACCDET_MODE - [2:0] */ +#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [2:0] */ +#define ARIZONA_ACCDET_MODE_WIDTH 3 /* ACCDET_MODE - [2:0] */ /* * R667 (0x29B) - Headphone Detect 1 @@ -3702,6 +3713,13 @@ #define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */ /* + * R1188 (0x4A4) HP Test Ctrl 1 + */ +#define ARIZONA_HP1_TST_CAP_SEL_MASK 0x0003 /* HP1_TST_CAP_SEL - [1:0] */ +#define ARIZONA_HP1_TST_CAP_SEL_SHIFT 0 /* HP1_TST_CAP_SEL - [1:0] */ +#define ARIZONA_HP1_TST_CAP_SEL_WIDTH 2 /* HP1_TST_CAP_SEL - [1:0] */ + +/* * R1244 (0x4DC) - DAC comp 1 */ #define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */ @@ -8043,6 +8061,66 @@ #define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */ /* + * R3840 (0xF00) - Clock Control + */ +#define ARIZONA_EXT_NG_SEL_CLR 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_MASK 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_SHIFT 7 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_WIDTH 1 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_SET 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_MASK 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_SHIFT 6 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_WIDTH 1 /* EXT_NG_SEL_SET */ +#define ARIZONA_CLK_R_ENA_CLR 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_MASK 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_SHIFT 5 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_WIDTH 1 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_SET 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_MASK 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_SHIFT 4 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_WIDTH 1 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_CLR 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_MASK 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_SHIFT 3 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_WIDTH 1 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_SET 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_MASK 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_SHIFT 2 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_WIDTH 1 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_L_ENA_CLR 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_MASK 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_SHIFT 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_WIDTH 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_SET 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_MASK 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_SHIFT 0 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_WIDTH 1 /* CLK_L_ENA_SET */ + +/* + * R3841 (0xF01) - ANC SRC + */ +#define ARIZONA_IN_RXANCR_SEL_MASK 0x0070 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_SHIFT 4 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_WIDTH 3 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCL_SEL_MASK 0x0007 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_SHIFT 0 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_WIDTH 3 /* IN_RXANCL_SEL - [0:2] */ + +/* + * R3863 (0xF17) - FCL ADC Reformatter Control + */ +#define ARIZONA_FCL_MIC_MODE_SEL 0x000C /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_SHIFT 2 /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_WIDTH 2 /* FCL_MIC_MODE_SEL - [2:3] */ + +/* + * R3954 (0xF72) - FCR ADC Reformatter Control + */ +#define ARIZONA_FCR_MIC_MODE_SEL 0x000C /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_SHIFT 2 /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_WIDTH 2 /* FCR_MIC_MODE_SEL - [2:3] */ + +/* * R4352 (0x1100) - DSP1 Control 1 */ #define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */ diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index cc8ad1e1a307..b24c771cebd5 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -11,6 +11,8 @@ #ifndef __LINUX_MFD_AXP20X_H #define __LINUX_MFD_AXP20X_H +#include <linux/regmap.h> + enum { AXP152_ID = 0, AXP202_ID, @@ -438,4 +440,26 @@ struct axp288_extcon_pdata { struct gpio_desc *gpio_mux_cntl; }; +/* generic helper function for reading 9-16 bit wide regs */ +static inline int axp20x_read_variable_width(struct regmap *regmap, + unsigned int reg, unsigned int width) +{ + unsigned int reg_val, result; + int err; + + err = regmap_read(regmap, reg, ®_val); + if (err) + return err; + + result = reg_val << (width - 8); + + err = regmap_read(regmap, reg + 1, ®_val); + if (err) + return err; + + result |= reg_val; + + return result; +} + #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index c4dd3a8add21..5010f978725c 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h @@ -65,6 +65,9 @@ #define DA9052_GPIO_2_3_REG 22 #define DA9052_GPIO_4_5_REG 23 #define DA9052_GPIO_6_7_REG 24 +#define DA9052_GPIO_8_9_REG 25 +#define DA9052_GPIO_10_11_REG 26 +#define DA9052_GPIO_12_13_REG 27 #define DA9052_GPIO_14_15_REG 28 /* POWER SEQUENCER CONTROL REGISTERS */ diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h index 76e668933a77..1bf50caeb9fa 100644 --- a/include/linux/mfd/da9150/core.h +++ b/include/linux/mfd/da9150/core.h @@ -15,6 +15,7 @@ #define __DA9150_CORE_H #include <linux/device.h> +#include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/regmap.h> @@ -46,23 +47,39 @@ #define DA9150_IRQ_GPADC 19 #define DA9150_IRQ_WKUP 20 +/* I2C sub-device address */ +#define DA9150_QIF_I2C_ADDR_LSB 0x5 + +struct da9150_fg_pdata { + u32 update_interval; /* msecs */ + u8 warn_soc_lvl; /* % value */ + u8 crit_soc_lvl; /* % value */ +}; + struct da9150_pdata { int irq_base; + struct da9150_fg_pdata *fg_pdata; }; struct da9150 { struct device *dev; struct regmap *regmap; + struct i2c_client *core_qif; + struct regmap_irq_chip_data *regmap_irq_data; int irq; int irq_base; }; -/* Device I/O */ +/* Device I/O - Query Interface for FG and standard register access */ +void da9150_read_qif(struct da9150 *da9150, u8 addr, int count, u8 *buf); +void da9150_write_qif(struct da9150 *da9150, u8 addr, int count, const u8 *buf); + u8 da9150_reg_read(struct da9150 *da9150, u16 reg); void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); + #endif /* __DA9150_CORE_H */ diff --git a/include/linux/mfd/intel_bxtwc.h b/include/linux/mfd/intel_bxtwc.h new file mode 100644 index 000000000000..1a0ee9d6efe9 --- /dev/null +++ b/include/linux/mfd/intel_bxtwc.h @@ -0,0 +1,69 @@ +/* + * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/mfd/intel_soc_pmic.h> + +#ifndef __INTEL_BXTWC_H__ +#define __INTEL_BXTWC_H__ + +/* BXT WC devices */ +#define BXTWC_DEVICE1_ADDR 0x4E +#define BXTWC_DEVICE2_ADDR 0x4F +#define BXTWC_DEVICE3_ADDR 0x5E + +/* device1 Registers */ +#define BXTWC_CHIPID 0x4E00 +#define BXTWC_CHIPVER 0x4E01 + +#define BXTWC_SCHGRIRQ0_ADDR 0x5E1A +#define BXTWC_CHGRCTRL0_ADDR 0x5E16 +#define BXTWC_CHGRCTRL1_ADDR 0x5E17 +#define BXTWC_CHGRCTRL2_ADDR 0x5E18 +#define BXTWC_CHGRSTATUS_ADDR 0x5E19 +#define BXTWC_THRMBATZONE_ADDR 0x4F22 + +#define BXTWC_USBPATH_ADDR 0x5E19 +#define BXTWC_USBPHYCTRL_ADDR 0x5E07 +#define BXTWC_USBIDCTRL_ADDR 0x5E05 +#define BXTWC_USBIDEN_MASK 0x01 +#define BXTWC_USBIDSTAT_ADDR 0x00FF +#define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29 + +#define BXTWC_DBGUSBBC1_ADDR 0x5FE0 +#define BXTWC_DBGUSBBC2_ADDR 0x5FE1 +#define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2 + +#define BXTWC_WAKESRC_ADDR 0x4E22 +#define BXTWC_WAKESRC2_ADDR 0x4EE5 +#define BXTWC_CHRTTADDR_ADDR 0x5E22 +#define BXTWC_CHRTTDATA_ADDR 0x5E23 + +#define BXTWC_STHRMIRQ0_ADDR 0x4F19 +#define WC_MTHRMIRQ1_ADDR 0x4E12 +#define WC_STHRMIRQ1_ADDR 0x4F1A +#define WC_STHRMIRQ2_ADDR 0x4F1B + +#define BXTWC_THRMZN0H_ADDR 0x4F44 +#define BXTWC_THRMZN0L_ADDR 0x4F45 +#define BXTWC_THRMZN1H_ADDR 0x4F46 +#define BXTWC_THRMZN1L_ADDR 0x4F47 +#define BXTWC_THRMZN2H_ADDR 0x4F48 +#define BXTWC_THRMZN2L_ADDR 0x4F49 +#define BXTWC_THRMZN3H_ADDR 0x4F4A +#define BXTWC_THRMZN3L_ADDR 0x4F4B +#define BXTWC_THRMZN4H_ADDR 0x4F4C +#define BXTWC_THRMZN4L_ADDR 0x4F4D + +#endif diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index abcbfcf32d10..cf619dbeace2 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -25,6 +25,8 @@ struct intel_soc_pmic { int irq; struct regmap *regmap; struct regmap_irq_chip_data *irq_chip_data; + struct regmap_irq_chip_data *irq_chip_data_level2; + struct device *dev; }; #endif /* __INTEL_SOC_PMIC_H__ */ diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index ff843e7ca23d..7eb7cbac0a9a 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -589,6 +589,7 @@ #define FORCE_ASPM_NO_ASPM 0x00 #define PM_CLK_FORCE_CTL 0xFE58 #define FUNC_FORCE_CTL 0xFE59 +#define FUNC_FORCE_UPME_XMT_DBG 0x02 #define PERST_GLITCH_WIDTH 0xFE5C #define CHANGE_LINK_STATE 0xFE5B #define RESET_LOAD_REG 0xFE5E @@ -712,6 +713,7 @@ #define PHY_RCR1 0x02 #define PHY_RCR1_ADP_TIME_4 0x0400 #define PHY_RCR1_VCO_COARSE 0x001F +#define PHY_RCR1_INIT_27S 0x0A1F #define PHY_SSCCR2 0x02 #define PHY_SSCCR2_PLL_NCODE 0x0A00 #define PHY_SSCCR2_TIME0 0x001C @@ -724,6 +726,7 @@ #define PHY_RCR2_FREQSEL_12 0x0040 #define PHY_RCR2_CDR_SC_12P 0x0010 #define PHY_RCR2_CALIB_LATE 0x0002 +#define PHY_RCR2_INIT_27S 0xC152 #define PHY_SSCCR3 0x03 #define PHY_SSCCR3_STEP_IN 0x2740 #define PHY_SSCCR3_CHECK_DELAY 0x0008 @@ -800,12 +803,14 @@ #define PHY_ANA1A_RXT_BIST 0x0500 #define PHY_ANA1A_TXR_BIST 0x0040 #define PHY_ANA1A_REV 0x0006 +#define PHY_FLD0_INIT_27S 0x2546 #define PHY_FLD1 0x1B #define PHY_FLD2 0x1C #define PHY_FLD3 0x1D #define PHY_FLD3_TIMER_4 0x0800 #define PHY_FLD3_TIMER_6 0x0020 #define PHY_FLD3_RXDELINK 0x0004 +#define PHY_FLD3_INIT_27S 0x0004 #define PHY_ANA1D 0x1D #define PHY_ANA1D_DEBUG_ADDR 0x0004 #define _PHY_FLD0 0x1D @@ -824,6 +829,7 @@ #define PHY_FLD4_BER_COUNT 0x00E0 #define PHY_FLD4_BER_TIMER 0x000A #define PHY_FLD4_BER_CHK_EN 0x0001 +#define PHY_FLD4_INIT_27S 0x5C7F #define PHY_DIG1E 0x1E #define PHY_DIG1E_REV 0x4000 #define PHY_DIG1E_D0_X_D1 0x1000 diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 75115384f3fc..a06098639399 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -132,6 +132,10 @@ struct sec_platform_data { int buck2_init; int buck3_init; int buck4_init; + /* Whether or not manually set PWRHOLD to low during shutdown. */ + bool manual_poweroff; + /* Disable the WRSTBI (buck voltage warm reset) when probing? */ + bool disable_wrstbi; }; /** diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 7981a9d77d3f..b288965e8101 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -179,6 +179,7 @@ enum s2mps11_regulators { #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ +#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4) #define S2MPS11_BUCK2_RAMP_SHIFT 6 #define S2MPS11_BUCK34_RAMP_SHIFT 4 diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index b1fd675fa36f..239e977ba45d 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h @@ -184,5 +184,6 @@ enum s2mps13_regulators { * Let's assume that default value will be set. */ #define S2MPS13_BUCK_RAMP_DELAY 12500 +#define S2MPS13_REG_WRSTBI_MASK BIT(5) #endif /* __LINUX_MFD_S2MPS13_H */ diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h index 386743dd931c..8bc51180800a 100644 --- a/include/linux/mfd/tps6105x.h +++ b/include/linux/mfd/tps6105x.h @@ -10,6 +10,7 @@ #define MFD_TPS6105X_H #include <linux/i2c.h> +#include <linux/regmap.h> #include <linux/regulator/machine.h> /* @@ -82,20 +83,15 @@ struct tps6105x_platform_data { /** * struct tps6105x - state holder for the TPS6105x drivers - * @mutex: mutex to serialize I2C accesses * @i2c_client: corresponding I2C client * @regulator: regulator device if used in voltage mode + * @regmap: used for i2c communcation on accessing registers */ struct tps6105x { struct tps6105x_platform_data *pdata; - struct mutex lock; struct i2c_client *client; struct regulator_dev *regulator; + struct regmap *regmap; }; -extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value); -extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf); -extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg, - u8 bitmask, u8 bitvalues); - #endif diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index d5b5f76d57ef..27d7c95fd0da 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -91,7 +91,8 @@ struct mbus_hw_ops { struct mbus_device * mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, - struct mbus_hw_ops *hw_ops, void __iomem *mmio_va); + struct mbus_hw_ops *hw_ops, int index, + void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); int mbus_register_driver(struct mbus_driver *drv); diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 81f6e427ba6b..543037465973 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -49,6 +49,7 @@ #define LOOP_CTRL_MINOR 237 #define VHOST_NET_MINOR 238 #define UHID_MINOR 239 +#define USERIO_MINOR 240 #define MISC_DYNAMIC_MINOR 255 struct device; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index baad4cb8e9b0..7501626ab529 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -214,6 +214,8 @@ enum { MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29, MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30, + MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, + MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, }; enum { @@ -833,6 +835,7 @@ struct mlx4_dev { struct mlx4_quotas quotas; struct radix_tree_root qp_table_tree; u8 rev_id; + u8 port_random_macs; char board_id[MLX4_BOARD_ID_LEN]; int numa_node; int oper_log_mgm_entry_size; diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index de45a51b3f04..fe052e234906 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -135,7 +135,10 @@ struct mlx4_rss_context { struct mlx4_qp_path { u8 fl; - u8 vlan_control; + union { + u8 vlan_control; + u8 control; + }; u8 disable_pkey_check; u8 pkey_index; u8 counter_index; @@ -156,9 +159,16 @@ struct mlx4_qp_path { }; enum { /* fl */ - MLX4_FL_CV = 1 << 6, - MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2 + MLX4_FL_CV = 1 << 6, + MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2, + MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1, + MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0, }; + +enum { /* control */ + MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7, +}; + enum { /* vlan_control */ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ @@ -254,6 +264,8 @@ enum { MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32, }; enum { /* param3 */ @@ -436,11 +448,13 @@ enum mlx4_update_qp_attr { MLX4_UPDATE_QP_VSD = 1 << 1, MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, MLX4_UPDATE_QP_QOS_VPORT = 1 << 3, - MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1 + MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4, + MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1 }; enum mlx4_update_qp_params_flags { - MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0, + MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0, + MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1, }; struct mlx4_update_qp_params { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 250b1ff8b48d..0b473cbfa7ef 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -429,7 +429,7 @@ struct health_buffer { __be32 rsvd2; u8 irisc_index; u8 synd; - __be16 ext_sync; + __be16 ext_synd; }; struct mlx5_init_seg { @@ -439,7 +439,8 @@ struct mlx5_init_seg { __be32 cmdq_addr_h; __be32 cmdq_addr_l_sz; __be32 cmd_dbell; - __be32 rsvd1[121]; + __be32 rsvd1[120]; + __be32 initializing; struct health_buffer health; __be32 rsvd2[884]; __be32 health_counter; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 8b6d6f2154a4..5c857f2a20d7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -391,9 +391,11 @@ struct mlx5_core_health { struct health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; - struct list_head list; u32 prev; int miss_counter; + bool sick; + struct workqueue_struct *wq; + struct work_struct work; }; struct mlx5_cq_table { @@ -485,8 +487,26 @@ struct mlx5_priv { spinlock_t ctx_lock; }; +enum mlx5_device_state { + MLX5_DEVICE_STATE_UP, + MLX5_DEVICE_STATE_INTERNAL_ERROR, +}; + +enum mlx5_interface_state { + MLX5_INTERFACE_STATE_DOWN, + MLX5_INTERFACE_STATE_UP, +}; + +enum mlx5_pci_status { + MLX5_PCI_STATUS_DISABLED, + MLX5_PCI_STATUS_ENABLED, +}; + struct mlx5_core_dev { struct pci_dev *pdev; + /* sync pci state */ + struct mutex pci_status_mutex; + enum mlx5_pci_status pci_status; u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; @@ -495,6 +515,10 @@ struct mlx5_core_dev { u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; + enum mlx5_device_state state; + /* sync interface state */ + struct mutex intf_state_mutex; + enum mlx5_interface_state interface_state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); @@ -676,8 +700,8 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); -void mlx5_health_cleanup(void); -void __init mlx5_health_init(void); +void mlx5_health_cleanup(struct mlx5_core_dev *dev); +int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, @@ -731,7 +755,7 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); #endif void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar); @@ -802,6 +826,11 @@ void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); +static inline int fw_initializing(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->initializing) >> 31; +} + static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; @@ -865,4 +894,8 @@ static inline int mlx5_get_gid_table_len(u16 param) return 8 * (1 << param); } +enum { + MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, +}; + #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 80001de019ba..00bad7793788 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -139,6 +139,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ +#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ @@ -202,6 +203,9 @@ extern unsigned int kobjsize(const void *objp); /* This mask defines which mm->def_flags a process can inherit its parent */ #define VM_INIT_DEF_MASK VM_NOHUGEPAGE +/* This mask is used to clear all the VMA flags used by mlock */ +#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) + /* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. @@ -426,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page, #endif } -static inline struct page *compound_head_by_tail(struct page *tail) -{ - struct page *head = tail->first_page; - - /* - * page->first_page may be a dangling pointer to an old - * compound page, so recheck that it is still a tail - * page before returning. - */ - smp_rmb(); - if (likely(PageTail(tail))) - return head; - return tail; -} - -/* - * Since either compound page could be dismantled asynchronously in THP - * or we access asynchronously arbitrary positioned struct page, there - * would be tail flag race. To handle this race, we should call - * smp_rmb() before checking tail flag. compound_head_by_tail() did it. - */ -static inline struct page *compound_head(struct page *page) -{ - if (unlikely(PageTail(page))) - return compound_head_by_tail(page); - return page; -} - -/* - * If we access compound page synchronously such as access to - * allocated page, there is no need to handle tail flag race, so we can - * check tail flag directly without any synchronization primitive. - */ -static inline struct page *compound_head_fast(struct page *page) -{ - if (unlikely(PageTail(page))) - return page->first_page; - return page; -} - /* * The atomic page->_mapcount, starts from -1: so that transitions * both from it and to it can be tracked, using atomic_inc_and_test @@ -514,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page) VM_BUG_ON_PAGE(!PageTail(page), page); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); - if (compound_tail_refcounted(page->first_page)) + if (compound_tail_refcounted(compound_head(page))) atomic_inc(&page->_mapcount); } @@ -537,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x) { struct page *page = virt_to_page(x); - /* - * We don't need to worry about synchronization of tail flag - * when we call virt_to_head_page() since it is only called for - * already allocated page and this page won't be freed until - * this virt_to_head_page() is finished. So use _fast variant. - */ - return compound_head_fast(page); + return compound_head(page); } /* @@ -564,28 +522,42 @@ int split_free_page(struct page *page); /* * Compound pages have a destructor function. Provide a * prototype for that function and accessor functions. - * These are _only_ valid on the head of a PG_compound page. + * These are _only_ valid on the head of a compound page. */ +typedef void compound_page_dtor(struct page *); + +/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ +enum compound_dtor_id { + NULL_COMPOUND_DTOR, + COMPOUND_PAGE_DTOR, +#ifdef CONFIG_HUGETLB_PAGE + HUGETLB_PAGE_DTOR, +#endif + NR_COMPOUND_DTORS, +}; +extern compound_page_dtor * const compound_page_dtors[]; static inline void set_compound_page_dtor(struct page *page, - compound_page_dtor *dtor) + enum compound_dtor_id compound_dtor) { - page[1].compound_dtor = dtor; + VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); + page[1].compound_dtor = compound_dtor; } static inline compound_page_dtor *get_compound_page_dtor(struct page *page) { - return page[1].compound_dtor; + VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); + return compound_page_dtors[page[1].compound_dtor]; } -static inline int compound_order(struct page *page) +static inline unsigned int compound_order(struct page *page) { if (!PageHead(page)) return 0; return page[1].compound_order; } -static inline void set_compound_order(struct page *page, unsigned long order) +static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; } @@ -1568,8 +1540,7 @@ static inline bool ptlock_init(struct page *page) * with 0. Make sure nobody took it in use in between. * * It can happen if arch try to use slab for page table allocation: - * slab code uses page->slab_cache and page->first_page (for tail - * pages), which share storage with page->ptl. + * slab code uses page->slab_cache, which share storage with page->ptl. */ VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); if (!ptlock_alloc(page)) @@ -1606,8 +1577,10 @@ static inline void pgtable_init(void) static inline bool pgtable_page_ctor(struct page *page) { + if (!ptlock_init(page)) + return false; inc_zone_page_state(page, NR_PAGETABLE); - return ptlock_init(page); + return true; } static inline void pgtable_page_dtor(struct page *page) @@ -1837,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); extern __printf(3, 4) -void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); +void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, + const char *fmt, ...); extern void setup_per_cpu_pageset(void); @@ -2036,8 +2010,6 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); -unsigned long max_sane_readahead(unsigned long nr); - /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2137,6 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ +#define FOLL_MLOCK 0x1000 /* lock present pages */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3d6baa7d4534..f8d1492a114f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -28,8 +28,6 @@ struct mem_cgroup; IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) -typedef void compound_page_dtor(struct page *); - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -113,7 +111,13 @@ struct page { }; }; - /* Third double word block */ + /* + * Third double word block + * + * WARNING: bit 0 of the first word encode PageTail(). That means + * the rest users of the storage space MUST NOT use the bit to + * avoid collision and false-positive PageTail(). + */ union { struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! @@ -131,18 +135,37 @@ struct page { #endif }; - struct slab *slab_page; /* slab fields */ struct rcu_head rcu_head; /* Used by SLAB * when destroying via RCU */ - /* First tail page of compound page */ + /* Tail pages of compound page */ struct { - compound_page_dtor *compound_dtor; - unsigned long compound_order; + unsigned long compound_head; /* If bit zero is set */ + + /* First tail page only */ +#ifdef CONFIG_64BIT + /* + * On 64 bit system we have enough space in struct page + * to encode compound_dtor and compound_order with + * unsigned int. It can help compiler generate better or + * smaller code on some archtectures. + */ + unsigned int compound_dtor; + unsigned int compound_order; +#else + unsigned short int compound_dtor; + unsigned short int compound_order; +#endif }; #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS - pgtable_t pmd_huge_pte; /* protected by page->ptl */ + struct { + unsigned long __pad; /* do not overlay pmd_huge_pte + * with compound_head to avoid + * possible bit 0 collision. + */ + pgtable_t pmd_huge_pte; /* protected by page->ptl */ + }; #endif }; @@ -163,7 +186,6 @@ struct page { #endif #endif struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ - struct page *first_page; /* Compound tail pages */ }; #ifdef CONFIG_MEMCG @@ -486,6 +508,9 @@ struct mm_struct { /* address of the bounds directory */ void __user *bd_addr; #endif +#ifdef CONFIG_HUGETLB_PAGE + atomic_long_t hugetlb_usage; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index fdd0779ccdfa..eb0151bac50c 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -269,7 +269,6 @@ struct mmc_card { /* for byte mode */ #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ /* (missing CIA registers) */ -#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 258daf914c6d..37967b6da03c 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -152,10 +152,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); -extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, - bool, bool); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); -extern int mmc_send_tuning(struct mmc_host *host); +extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #define MMC_ERASE_ARG 0x00000000 diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 134c57422740..f67b2ec18e6d 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -16,6 +16,7 @@ #include <linux/scatterlist.h> #include <linux/mmc/core.h> +#include <linux/dmaengine.h> #define MAX_MCI_SLOTS 2 @@ -40,6 +41,17 @@ enum { struct mmc_data; +enum { + TRANS_MODE_PIO = 0, + TRANS_MODE_IDMAC, + TRANS_MODE_EDMAC +}; + +struct dw_mci_dma_slave { + struct dma_chan *ch; + enum dma_transfer_direction direction; +}; + /** * struct dw_mci - MMC controller state shared between all slots * @lock: Spinlock protecting the queue and associated data. @@ -154,7 +166,14 @@ struct dw_mci { dma_addr_t sg_dma; void *sg_cpu; const struct dw_mci_dma_ops *dma_ops; + /* For idmac */ unsigned int ring_size; + + /* For edmac */ + struct dw_mci_dma_slave *dms; + /* Registers's physical base address */ + void *phy_regs; + u32 cmd_status; u32 data_status; u32 stop_cmdr; @@ -208,8 +227,8 @@ struct dw_mci { struct dw_mci_dma_ops { /* DMA Ops */ int (*init)(struct dw_mci *host); - void (*start)(struct dw_mci *host, unsigned int sg_len); - void (*complete)(struct dw_mci *host); + int (*start)(struct dw_mci *host, unsigned int sg_len); + void (*complete)(void *host); void (*stop)(struct dw_mci *host); void (*cleanup)(struct dw_mci *host); void (*exit)(struct dw_mci *host); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 83b81fd865f3..8673ffe3d86e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -292,18 +292,6 @@ struct mmc_host { mmc_pm_flag_t pm_caps; /* supported pm features */ -#ifdef CONFIG_MMC_CLKGATE - int clk_requests; /* internal reference counter */ - unsigned int clk_delay; /* number of MCI clk hold cycles */ - bool clk_gated; /* clock gated */ - struct delayed_work clk_gate_work; /* delayed clock gate */ - unsigned int clk_old; /* old clock value cache */ - spinlock_t clk_lock; /* lock for clk fields */ - struct mutex clk_gate_mutex; /* mutex for clock gating */ - struct device_attribute clkgate_delay_attr; - unsigned long clkgate_delay; -#endif - /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned short max_segs; /* see blk_queue_max_segments */ @@ -423,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); +int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); #else static inline int mmc_regulator_get_ocrmask(struct regulator *supply) { @@ -435,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, { return 0; } + +static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + return -EINVAL; +} #endif int mmc_regulator_get_supply(struct mmc_host *mmc); @@ -479,26 +474,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host) return host->caps2 & MMC_CAP2_PACKED_WR; } -#ifdef CONFIG_MMC_CLKGATE -void mmc_host_clk_hold(struct mmc_host *host); -void mmc_host_clk_release(struct mmc_host *host); -unsigned int mmc_host_clk_rate(struct mmc_host *host); - -#else -static inline void mmc_host_clk_hold(struct mmc_host *host) -{ -} - -static inline void mmc_host_clk_release(struct mmc_host *host) -{ -} - -static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) -{ - return host->ios.clock; -} -#endif - static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d94347737292..e23a9e704536 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -37,10 +37,10 @@ enum { MIGRATE_UNMOVABLE, - MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, + MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ - MIGRATE_RESERVE = MIGRATE_PCPTYPES, + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way @@ -334,13 +334,16 @@ struct zone { /* zone watermarks, access with *_wmark_pages(zone) macros */ unsigned long watermark[NR_WMARK]; + unsigned long nr_reserved_highatomic; + /* - * We don't know if the memory that we're going to allocate will be freeable - * or/and it will be released eventually, so to avoid totally wasting several - * GB of ram we must reserve some of the lower zone memory (otherwise we risk - * to run OOM on the lower zones despite there's tons of freeable ram - * on the higher zones). This array is recalculated at runtime if the - * sysctl_lowmem_reserve_ratio sysctl changes. + * We don't know if the memory that we're going to allocate will be + * freeable or/and it will be released eventually, so to avoid totally + * wasting several GB of ram we must reserve some of the lower zone + * memory (otherwise we risk to run OOM on the lower zones despite + * there being tons of freeable ram on the higher zones). This array is + * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl + * changes. */ long lowmem_reserve[MAX_NR_ZONES]; @@ -429,12 +432,6 @@ struct zone { const char *name; - /* - * Number of MIGRATE_RESERVE page block. To maintain for just - * optimization. Protected by zone->lock. - */ - int nr_migrate_reserve_block; - #ifdef CONFIG_MEMORY_ISOLATION /* * Number of isolated pageblock. It is used to solve incorrect @@ -589,75 +586,8 @@ static inline bool zone_is_empty(struct zone *zone) * [1] : No fallback (__GFP_THISNODE) */ #define MAX_ZONELISTS 2 - - -/* - * We cache key information from each zonelist for smaller cache - * footprint when scanning for free pages in get_page_from_freelist(). - * - * 1) The BITMAP fullzones tracks which zones in a zonelist have come - * up short of free memory since the last time (last_fullzone_zap) - * we zero'd fullzones. - * 2) The array z_to_n[] maps each zone in the zonelist to its node - * id, so that we can efficiently evaluate whether that node is - * set in the current tasks mems_allowed. - * - * Both fullzones and z_to_n[] are one-to-one with the zonelist, - * indexed by a zones offset in the zonelist zones[] array. - * - * The get_page_from_freelist() routine does two scans. During the - * first scan, we skip zones whose corresponding bit in 'fullzones' - * is set or whose corresponding node in current->mems_allowed (which - * comes from cpusets) is not set. During the second scan, we bypass - * this zonelist_cache, to ensure we look methodically at each zone. - * - * Once per second, we zero out (zap) fullzones, forcing us to - * reconsider nodes that might have regained more free memory. - * The field last_full_zap is the time we last zapped fullzones. - * - * This mechanism reduces the amount of time we waste repeatedly - * reexaming zones for free memory when they just came up low on - * memory momentarilly ago. - * - * The zonelist_cache struct members logically belong in struct - * zonelist. However, the mempolicy zonelists constructed for - * MPOL_BIND are intentionally variable length (and usually much - * shorter). A general purpose mechanism for handling structs with - * multiple variable length members is more mechanism than we want - * here. We resort to some special case hackery instead. - * - * The MPOL_BIND zonelists don't need this zonelist_cache (in good - * part because they are shorter), so we put the fixed length stuff - * at the front of the zonelist struct, ending in a variable length - * zones[], as is needed by MPOL_BIND. - * - * Then we put the optional zonelist cache on the end of the zonelist - * struct. This optional stuff is found by a 'zlcache_ptr' pointer in - * the fixed length portion at the front of the struct. This pointer - * both enables us to find the zonelist cache, and in the case of - * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) - * to know that the zonelist cache is not there. - * - * The end result is that struct zonelists come in two flavors: - * 1) The full, fixed length version, shown below, and - * 2) The custom zonelists for MPOL_BIND. - * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. - * - * Even though there may be multiple CPU cores on a node modifying - * fullzones or last_full_zap in the same zonelist_cache at the same - * time, we don't lock it. This is just hint data - if it is wrong now - * and then, the allocator will still function, perhaps a bit slower. - */ - - -struct zonelist_cache { - unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ - DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ - unsigned long last_full_zap; /* when last zap'd (jiffies) */ -}; #else #define MAX_ZONELISTS 1 -struct zonelist_cache; #endif /* @@ -675,9 +605,6 @@ struct zoneref { * allocation, the other zones are fallback zones, in decreasing * priority. * - * If zlcache_ptr is not NULL, then it is just the address of zlcache, - * as explained above. If zlcache_ptr is NULL, there is no zlcache. - * * * To speed the reading of the zonelist, the zonerefs contain the zone index * of the entry being read. Helper functions to access information given * a struct zoneref are @@ -687,11 +614,7 @@ struct zoneref { * zonelist_node_idx() - Return the index of the node for an entry */ struct zonelist { - struct zonelist_cache *zlcache_ptr; // NULL or &zlcache struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; -#ifdef CONFIG_NUMA - struct zonelist_cache zlcache; // optional ... -#endif }; #ifndef CONFIG_DISCONTIGMEM @@ -817,14 +740,13 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, int alloc_flags); + unsigned long mark, int classzone_idx); enum memmap_context { MEMMAP_EARLY, MEMMAP_HOTPLUG, }; extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, - unsigned long size, - enum memmap_context context); + unsigned long size); extern void lruvec_init(struct lruvec *lruvec); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 688997a24aad..64f36e09a790 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -219,6 +219,14 @@ struct serio_device_id { __u8 proto; }; +struct hda_device_id { + __u32 vendor_id; + __u32 rev_id; + __u8 api_version; + const char *name; + unsigned long driver_data; +}; + /* * Struct used for matching a device */ @@ -601,15 +609,13 @@ struct ipack_device_id { #define MEI_CL_MODULE_PREFIX "mei:" #define MEI_CL_NAME_SIZE 32 -#define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" -#define MEI_CL_UUID_ARGS(_u) \ - _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \ - _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15] +#define MEI_CL_VERSION_ANY 0xff /** * struct mei_cl_device_id - MEI client device identifier * @name: helper name * @uuid: client uuid + * @version: client protocol version * @driver_info: information used by the driver. * * identifies mei client device by uuid and name @@ -617,6 +623,7 @@ struct ipack_device_id { struct mei_cl_device_id { char name[MEI_CL_NAME_SIZE]; uuid_le uuid; + __u8 version; kernel_ulong_t driver_info; }; diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index c12f2147c350..52666d90ca94 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -386,6 +386,7 @@ extern int param_get_ullong(char *buffer, const struct kernel_param *kp); extern const struct kernel_param_ops param_ops_charp; extern int param_set_charp(const char *val, const struct kernel_param *kp); extern int param_get_charp(char *buffer, const struct kernel_param *kp); +extern void param_free_charp(void *arg); #define param_check_charp(name, p) __param_check(name, p, char *) /* We used to allow int as well as bool. We're taking that away! */ diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 641b7d6fd096..3a5abe95affd 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -31,12 +31,7 @@ #define G10_MPI_H #include <linux/types.h> - -/* DSI defines */ - -#define SHA1_DIGEST_LENGTH 20 - -/*end of DSI defines */ +#include <linux/scatterlist.h> #define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8) #define BITS_PER_MPI_LIMB BITS_PER_LONG @@ -78,6 +73,7 @@ void mpi_swap(MPI a, MPI b); MPI do_encode_md(const void *sha_buffer, unsigned nbits); MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); +MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len); int mpi_fromstr(MPI val, const char *str); u32 mpi_get_keyid(MPI a, u32 *keyid); void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); @@ -85,6 +81,8 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); +int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, + int *sign); #define log_mpidump g10_log_mpidump diff --git a/include/linux/msi.h b/include/linux/msi.h index ad939d0ba816..f71a25e5fd25 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -163,6 +163,8 @@ struct msi_controller { int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, struct msi_desc *desc); + int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, + int nvec, int type); void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); }; @@ -174,6 +176,7 @@ struct msi_controller { struct irq_domain; struct irq_chip; struct device_node; +struct fwnode_handle; struct msi_domain_info; /** @@ -262,7 +265,7 @@ enum { int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force); -struct irq_domain *msi_create_irq_domain(struct device_node *of_node, +struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, @@ -270,7 +273,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); -struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, +struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, @@ -280,19 +283,26 @@ void platform_msi_domain_free_irqs(struct device *dev); #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); -struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, +struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, int nvec, int type); void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); -struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, +struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, struct msi_desc *desc); int pci_msi_domain_check_cap(struct irq_domain *domain, struct msi_domain_info *info, struct device *dev); +u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); +struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); +#else +static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) +{ + return NULL; +} #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ #endif /* LINUX_MSI_H */ diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h deleted file mode 100644 index fe722c1fb61d..000000000000 --- a/include/linux/msm_mdp.h +++ /dev/null @@ -1,79 +0,0 @@ -/* include/linux/msm_mdp.h - * - * Copyright (C) 2007 Google Incorporated - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef _MSM_MDP_H_ -#define _MSM_MDP_H_ - -#include <linux/types.h> - -#define MSMFB_IOCTL_MAGIC 'm' -#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int) -#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int) - -enum { - MDP_RGB_565, /* RGB 565 planar */ - MDP_XRGB_8888, /* RGB 888 padded */ - MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */ - MDP_ARGB_8888, /* ARGB 888 */ - MDP_RGB_888, /* RGB 888 planar */ - MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */ - MDP_YCRYCB_H2V1, /* YCrYCb interleave */ - MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ - MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ - MDP_RGBA_8888, /* ARGB 888 */ - MDP_BGRA_8888, /* ABGR 888 */ - MDP_RGBX_8888, /* RGBX 888 */ - MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */ -}; - -enum { - PMEM_IMG, - FB_IMG, -}; - -/* flag values */ -#define MDP_ROT_NOP 0 -#define MDP_FLIP_LR 0x1 -#define MDP_FLIP_UD 0x2 -#define MDP_ROT_90 0x4 -#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR) -#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR) -#define MDP_DITHER 0x8 -#define MDP_BLUR 0x10 - -#define MDP_TRANSP_NOP 0xffffffff -#define MDP_ALPHA_NOP 0xff - -struct mdp_rect { - u32 x, y, w, h; -}; - -struct mdp_img { - u32 width, height, format, offset; - int memory_id; /* the file descriptor */ -}; - -struct mdp_blit_req { - struct mdp_img src; - struct mdp_img dst; - struct mdp_rect src_rect; - struct mdp_rect dst_rect; - u32 alpha, transp_mask, flags; -}; - -struct mdp_blit_req_list { - u32 count; - struct mdp_blit_req req[]; -}; - -#endif /* _MSM_MDP_H_ */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 272f42952f34..5a9d1d4c2487 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -504,16 +504,16 @@ struct nand_ecc_ctrl { int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page); int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required); + const uint8_t *buf, int oob_required, int page); int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page); int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offs, uint32_t len, uint8_t *buf, int page); int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset, uint32_t data_len, - const uint8_t *data_buf, int oob_required); + const uint8_t *data_buf, int oob_required, int page); int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required); + const uint8_t *buf, int oob_required, int page); int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, int page); int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, @@ -544,7 +544,7 @@ struct nand_buffers { * flash device * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the * flash device. - * @dn: [BOARDSPECIFIC] device node describing this instance + * @flash_node: [BOARDSPECIFIC] device node describing this instance * @read_byte: [REPLACEABLE] read one byte from the chip * @read_word: [REPLACEABLE] read one word from the chip * @write_byte: [REPLACEABLE] write a single byte to the chip on the @@ -556,10 +556,6 @@ struct nand_buffers { * @block_markbad: [REPLACEABLE] mark a block bad * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling * ALE/CLE/nCE. Also used to write command and address - * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting - * mtd->oobsize, mtd->writesize and so on. - * @id_data contains the 8 bytes values of NAND_CMD_READID. - * Return with the bus width. * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing * device ready/busy line. If set to NULL no access to * ready/busy is available and the ready/busy information @@ -647,7 +643,7 @@ struct nand_chip { void __iomem *IO_ADDR_R; void __iomem *IO_ADDR_W; - struct device_node *dn; + struct device_node *flash_node; uint8_t (*read_byte)(struct mtd_info *mtd); u16 (*read_word)(struct mtd_info *mtd); @@ -658,8 +654,6 @@ struct nand_chip { int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); - int (*init_size)(struct mtd_info *mtd, struct nand_chip *this, - u8 *id_data); int (*dev_ready)(struct mtd_info *mtd); void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, int page_addr); @@ -1030,4 +1024,9 @@ struct nand_sdr_timings { /* get timing characteristics from ONFI timing mode. */ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); + +int nand_check_erased_ecc_chunk(void *data, int datalen, + void *ecc, int ecclen, + void *extraoob, int extraooblen, + int threshold); #endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index e5409524bb0a..c8723b62c4cd 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -10,6 +10,23 @@ #ifndef __LINUX_MTD_SPI_NOR_H #define __LINUX_MTD_SPI_NOR_H +#include <linux/bitops.h> +#include <linux/mtd/cfi.h> + +/* + * Manufacturer IDs + * + * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. + * Sometimes these are the same as CFI IDs, but sometimes they aren't. + */ +#define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_INTEL CFI_MFR_INTEL +#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX +#define SNOR_MFR_SPANSION CFI_MFR_AMD +#define SNOR_MFR_SST CFI_MFR_SST +#define SNOR_MFR_WINBOND 0xef + /* * Note on opcode nomenclature: some opcodes have a format like * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number @@ -61,24 +78,24 @@ #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ /* Status Register bits. */ -#define SR_WIP 1 /* Write in progress */ -#define SR_WEL 2 /* Write enable latch */ +#define SR_WIP BIT(0) /* Write in progress */ +#define SR_WEL BIT(1) /* Write enable latch */ /* meaning of other SR_* bits may differ between vendors */ -#define SR_BP0 4 /* Block protect 0 */ -#define SR_BP1 8 /* Block protect 1 */ -#define SR_BP2 0x10 /* Block protect 2 */ -#define SR_SRWD 0x80 /* SR write protect */ +#define SR_BP0 BIT(2) /* Block protect 0 */ +#define SR_BP1 BIT(3) /* Block protect 1 */ +#define SR_BP2 BIT(4) /* Block protect 2 */ +#define SR_SRWD BIT(7) /* SR write protect */ -#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */ +#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ /* Enhanced Volatile Configuration Register bits */ -#define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */ +#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ /* Flag Status Register bits */ -#define FSR_READY 0x80 +#define FSR_READY BIT(7) /* Configuration Register bits. */ -#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */ +#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ enum read_mode { SPI_NOR_NORMAL = 0, @@ -87,33 +104,6 @@ enum read_mode { SPI_NOR_QUAD, }; -/** - * struct spi_nor_xfer_cfg - Structure for defining a Serial Flash transfer - * @wren: command for "Write Enable", or 0x00 for not required - * @cmd: command for operation - * @cmd_pins: number of pins to send @cmd (1, 2, 4) - * @addr: address for operation - * @addr_pins: number of pins to send @addr (1, 2, 4) - * @addr_width: number of address bytes - * (3,4, or 0 for address not required) - * @mode: mode data - * @mode_pins: number of pins to send @mode (1, 2, 4) - * @mode_cycles: number of mode cycles (0 for mode not required) - * @dummy_cycles: number of dummy cycles (0 for dummy not required) - */ -struct spi_nor_xfer_cfg { - u8 wren; - u8 cmd; - u8 cmd_pins; - u32 addr; - u8 addr_pins; - u8 addr_width; - u8 mode; - u8 mode_pins; - u8 mode_cycles; - u8 dummy_cycles; -}; - #define SPI_NOR_MAX_CMD_SIZE 8 enum spi_nor_ops { SPI_NOR_OPS_READ = 0, @@ -127,11 +117,14 @@ enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), }; +struct mtd_info; + /** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. + * @flash_node: point to a device node describing this flash instance. * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -141,28 +134,28 @@ enum spi_nor_option_flags { * @flash_read: the mode of the read * @sst_write_second: used by the SST write operation * @flags: flag options for the current SPI-NOR (SNOR_F_*) - * @cfg: used by the read_xfer/write_xfer * @cmd_buf: used by the write_reg * @prepare: [OPTIONAL] do some preparations for the * read/write/erase/lock/unlock operations * @unprepare: [OPTIONAL] do some post work after the * read/write/erase/lock/unlock operations - * @read_xfer: [OPTIONAL] the read fundamental primitive - * @write_xfer: [OPTIONAL] the writefundamental primitive * @read_reg: [DRIVER-SPECIFIC] read out the register * @write_reg: [DRIVER-SPECIFIC] write data to the register * @read: [DRIVER-SPECIFIC] read data from the SPI NOR * @write: [DRIVER-SPECIFIC] write data to the SPI NOR * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR * at the offset @offs - * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR - * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR + * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is + * completely locked * @priv: the private data */ struct spi_nor { - struct mtd_info *mtd; + struct mtd_info mtd; struct mutex lock; struct device *dev; + struct device_node *flash_node; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -172,18 +165,12 @@ struct spi_nor { enum read_mode flash_read; bool sst_write_second; u32 flags; - struct spi_nor_xfer_cfg cfg; u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); - int (*read_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, - u8 *buf, size_t len); - int (*write_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, - u8 *buf, size_t len); int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); - int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, - int write_enable); + int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); int (*read)(struct spi_nor *nor, loff_t from, size_t len, size_t *retlen, u_char *read_buf); @@ -193,6 +180,7 @@ struct spi_nor { int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); void *priv; }; diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h index 5d0b2a1dee69..90a803aa42e8 100644 --- a/include/linux/n_r3964.h +++ b/include/linux/n_r3964.h @@ -152,9 +152,6 @@ struct r3964_info { unsigned char *rx_buf; /* ring buffer */ unsigned char *tx_buf; - wait_queue_head_t read_wait; - //struct wait_queue *read_wait; - struct r3964_block_header *rx_first; struct r3964_block_header *rx_last; struct r3964_block_header *tx_first; @@ -164,8 +161,9 @@ struct r3964_info { unsigned char last_rx; unsigned char bcc; unsigned int blocks_in_rx_queue; - - + + struct mutex read_lock; /* serialize r3964_read */ + struct r3964_client_info *firstClient; unsigned int state; unsigned int flags; diff --git a/include/linux/net.h b/include/linux/net.h index 049d4b03c4c4..70ac5e28e6b7 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -24,7 +24,8 @@ #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ #include <linux/kmemcheck.h> #include <linux/rcupdate.h> -#include <linux/jump_label.h> +#include <linux/once.h> + #include <uapi/linux/net.h> struct poll_table_struct; @@ -250,22 +251,8 @@ do { \ } while (0) #endif -bool __net_get_random_once(void *buf, int nbytes, bool *done, - struct static_key *done_key); - -#define net_get_random_once(buf, nbytes) \ - ({ \ - bool ___ret = false; \ - static bool ___done = false; \ - static struct static_key ___once_key = \ - STATIC_KEY_INIT_TRUE; \ - if (static_key_true(&___once_key)) \ - ___ret = __net_get_random_once(buf, \ - nbytes, \ - &___done, \ - &___once_key); \ - ___ret; \ - }) +#define net_get_random_once(buf, nbytes) \ + get_random_once((buf), (nbytes)) int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9672781c593d..f0d87347df19 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -125,6 +125,9 @@ enum { #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) +#define for_each_netdev_feature(mask_addr, bit) \ + for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) + /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ @@ -167,6 +170,12 @@ enum { */ #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) +/* + * If upper/master device has these features disabled, they must be disabled + * on all lower/slave devices as well. + */ +#define NETIF_F_UPPER_DISABLES NETIF_F_LRO + /* changeable features with no special hardware requirements */ #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2d15e3831440..a9e3bf42d287 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -718,8 +718,8 @@ struct xps_map { u16 queues[0]; }; #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) -#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ - / sizeof(u16)) +#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ + - sizeof(struct xps_map)) / sizeof(u16)) /* * This structure holds all XPS maps for device. Maps are indexed by CPU. @@ -881,6 +881,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, * int max_tx_rate); * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); @@ -1054,6 +1055,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * This function is used to pass protocol port error state information * to the switch driver. The switch driver can react to the proto_down * by doing a phys down on the associated switch port. + * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); + * This function is used to get egress tunnel information for given skb. + * This is useful for retrieving outer tunnel header parameters while + * sampling packet. * */ struct net_device_ops { @@ -1109,6 +1114,8 @@ struct net_device_ops { int max_tx_rate); int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + int (*ndo_set_vf_trust)(struct net_device *dev, + int vf, bool setting); int (*ndo_get_vf_config)(struct net_device *dev, int vf, struct ifla_vf_info *ivf); @@ -1227,6 +1234,8 @@ struct net_device_ops { int (*ndo_get_iflink)(const struct net_device *dev); int (*ndo_change_proto_down)(struct net_device *dev, bool proto_down); + int (*ndo_fill_metadata_dst)(struct net_device *dev, + struct sk_buff *skb); }; /** @@ -1258,9 +1267,10 @@ struct net_device_ops { * @IFF_LIVE_ADDR_CHANGE: device supports hardware address * change when it's running * @IFF_MACVLAN: Macvlan device - * @IFF_VRF_MASTER: device is a VRF master + * @IFF_L3MDEV_MASTER: device is an L3 master device * @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_OPENVSWITCH: device is a Open vSwitch master + * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1283,9 +1293,10 @@ enum netdev_priv_flags { IFF_XMIT_DST_RELEASE_PERM = 1<<17, IFF_IPVLAN_MASTER = 1<<18, IFF_IPVLAN_SLAVE = 1<<19, - IFF_VRF_MASTER = 1<<20, + IFF_L3MDEV_MASTER = 1<<20, IFF_NO_QUEUE = 1<<21, IFF_OPENVSWITCH = 1<<22, + IFF_L3MDEV_SLAVE = 1<<23, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1308,7 +1319,7 @@ enum netdev_priv_flags { #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE -#define IFF_VRF_MASTER IFF_VRF_MASTER +#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER #define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_OPENVSWITCH IFF_OPENVSWITCH @@ -1427,7 +1438,6 @@ enum netdev_priv_flags { * @dn_ptr: DECnet specific data * @ip6_ptr: IPv6 specific data * @ax25_ptr: AX.25 specific data - * @vrf_ptr: VRF specific data * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering * * @last_rx: Time of last Rx @@ -1587,6 +1597,9 @@ struct net_device { #ifdef CONFIG_NET_SWITCHDEV const struct switchdev_ops *switchdev_ops; #endif +#ifdef CONFIG_NET_L3_MASTER_DEV + const struct l3mdev_ops *l3mdev_ops; +#endif const struct header_ops *header_ops; @@ -1646,7 +1659,6 @@ struct net_device { struct dn_dev __rcu *dn_ptr; struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; - struct net_vrf_dev __rcu *vrf_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; #if IS_ENABLED(CONFIG_MPLS_ROUTING) @@ -2103,6 +2115,7 @@ struct pcpu_sw_netstats { #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ #define NETDEV_CHANGEINFODATA 0x0018 #define NETDEV_BONDING_INFO 0x0019 +#define NETDEV_PRECHANGEUPPER 0x001A int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2203,6 +2216,7 @@ void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); int dev_get_iflink(const struct net_device *dev); +int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); @@ -2213,12 +2227,8 @@ int dev_open(struct net_device *dev); int dev_close(struct net_device *dev); int dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); -int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb); -int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb); -static inline int dev_queue_xmit(struct sk_buff *skb) -{ - return dev_queue_xmit_sk(skb->sk, skb); -} +int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); +int dev_queue_xmit(struct sk_buff *skb); int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); @@ -2990,11 +3000,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); -int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb); -static inline int netif_receive_skb(struct sk_buff *skb) -{ - return netif_receive_skb_sk(skb->sk, skb); -} +int netif_receive_skb(struct sk_buff *skb); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); @@ -3832,9 +3838,14 @@ static inline bool netif_supports_nofcs(struct net_device *dev) return dev->priv_flags & IFF_SUPP_NOFCS; } -static inline bool netif_is_vrf(const struct net_device *dev) +static inline bool netif_is_l3_master(const struct net_device *dev) { - return dev->priv_flags & IFF_VRF_MASTER; + return dev->priv_flags & IFF_L3MDEV_MASTER; +} + +static inline bool netif_is_l3_slave(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_SLAVE; } static inline bool netif_is_bridge_master(const struct net_device *dev) @@ -3847,27 +3858,6 @@ static inline bool netif_is_ovs_master(const struct net_device *dev) return dev->priv_flags & IFF_OPENVSWITCH; } -static inline bool netif_index_is_vrf(struct net *net, int ifindex) -{ - bool rc = false; - -#if IS_ENABLED(CONFIG_NET_VRF) - struct net_device *dev; - - if (ifindex == 0) - return false; - - rcu_read_lock(); - - dev = dev_get_by_index_rcu(net, ifindex); - if (dev) - rc = netif_is_vrf(dev); - - rcu_read_unlock(); -#endif - return rc; -} - /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 36a652531791..0ad556726181 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -54,8 +54,9 @@ struct nf_hook_state { struct net_device *in; struct net_device *out; struct sock *sk; + struct net *net; struct list_head *hook_list; - int (*okfn)(struct sock *, struct sk_buff *); + int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; static inline void nf_hook_state_init(struct nf_hook_state *p, @@ -65,7 +66,8 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, struct net_device *indev, struct net_device *outdev, struct sock *sk, - int (*okfn)(struct sock *, struct sk_buff *)) + struct net *net, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; p->thresh = thresh; @@ -73,11 +75,12 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, p->in = indev; p->out = outdev; p->sk = sk; + p->net = net; p->hook_list = hook_list; p->okfn = okfn; } -typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, +typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); @@ -87,7 +90,6 @@ struct nf_hook_ops { /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; - struct module *owner; void *priv; u_int8_t pf; unsigned int hooknum; @@ -167,32 +169,32 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); * value indicates the packet has been consumed by the hook. */ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sock *, struct sk_buff *), + int (*okfn)(struct net *, struct sock *, struct sk_buff *), int thresh) { - struct net *net = dev_net(indev ? indev : outdev); struct list_head *hook_list = &net->nf.hooks[pf][hook]; if (nf_hook_list_active(hook_list, pf, hook)) { struct nf_hook_state state; nf_hook_state_init(&state, hook_list, hook, thresh, - pf, indev, outdev, sk, okfn); + pf, indev, outdev, sk, net, okfn); return nf_hook_slow(skb, &state); } return 1; } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, - struct sk_buff *skb, struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sock *, struct sk_buff *)) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN); + return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN); } /* Activate hook; either okfn or kfree_skb called, unless a hook @@ -213,36 +215,38 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, */ static inline int -NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk, +NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sock *, struct sk_buff *), int thresh) + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + int thresh) { - int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh); + int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh); if (ret == 1) - ret = okfn(sk, skb); + ret = okfn(net, sk, skb); return ret; } static inline int -NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk, +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sock *, struct sk_buff *), bool cond) + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + bool cond) { int ret; if (!cond || - ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1)) - ret = okfn(sk, skb); + ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1)) + ret = okfn(net, sk, skb); return ret; } static inline int -NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, +NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sock *, struct sk_buff *)) + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN); + return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN); } /* Call setsockopt() */ @@ -278,7 +282,7 @@ struct nf_afinfo { struct flowi *fl, bool strict); void (*saveroute)(const struct sk_buff *skb, struct nf_queue_entry *entry); - int (*reroute)(struct sk_buff *skb, + int (*reroute)(struct net *net, struct sk_buff *skb, const struct nf_queue_entry *entry); int route_key_size; }; @@ -342,21 +346,27 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) } #else /* !CONFIG_NETFILTER */ -#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb) -#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb) -static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, - struct sock *sk, - struct sk_buff *skb, - struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sock *sk, struct sk_buff *), int thresh) +static inline int +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + bool cond) +{ + return okfn(net, sk, skb); +} + +static inline int +NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return okfn(sk, skb); + return okfn(net, sk, skb); } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, - struct sk_buff *skb, struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sock *, struct sk_buff *)) + +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return 1; } @@ -373,24 +383,28 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; +#else +static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +#endif struct nf_conn; enum ip_conntrack_info; struct nlattr; -struct nfq_ct_hook { +struct nfnl_ct_hook { + struct nf_conn *(*get_ct)(const struct sk_buff *skb, + enum ip_conntrack_info *ctinfo); size_t (*build_size)(const struct nf_conn *ct); - int (*build)(struct sk_buff *skb, struct nf_conn *ct); + int (*build)(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + u_int16_t ct_attr, u_int16_t ct_info_attr); int (*parse)(const struct nlattr *attr, struct nf_conn *ct); int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report); void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); }; -extern struct nfq_ct_hook __rcu *nfq_ct_hook; -#else -static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} -#endif +extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; /** * nf_skb_duplicated - TEE target has sent a packet diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index e955d4730625..249d1bb01e03 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -45,11 +45,11 @@ int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, void nfnl_lock(__u8 subsys_id); void nfnl_unlock(__u8 subsys_id); #ifdef CONFIG_PROVE_LOCKING -int lockdep_nfnl_is_held(__u8 subsys_id); +bool lockdep_nfnl_is_held(__u8 subsys_id); #else -static inline int lockdep_nfnl_is_held(__u8 subsys_id) +static inline bool lockdep_nfnl_is_held(__u8 subsys_id) { - return 1; + return true; } #endif /* CONFIG_PROVE_LOCKING */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index b006b719183f..c5577410c25d 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -13,6 +13,7 @@ * @target: the target extension * @matchinfo: per-match data * @targetinfo: per-target data + * @net network namespace through which the action was invoked * @in: input netdevice * @out: output netdevice * @fragoff: packet is a fragment, this is the data offset @@ -24,7 +25,6 @@ * Fields written to by extensions: * * @hotdrop: drop packet if we had inspection problems - * Network namespace obtainable using dev_net(in/out) */ struct xt_action_param { union { @@ -34,6 +34,7 @@ struct xt_action_param { union { const void *matchinfo, *targinfo; }; + struct net *net; const struct net_device *in, *out; int fragoff; unsigned int thoff; diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index c22a7fb8d0df..6f074db2f23d 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -53,7 +53,6 @@ extern struct xt_table *arpt_register_table(struct net *net, const struct arpt_replace *repl); extern void arpt_unregister_table(struct xt_table *table); extern unsigned int arpt_do_table(struct sk_buff *skb, - unsigned int hook, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 2437b8a5d7a9..2ed40c402b5e 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -17,7 +17,7 @@ enum nf_br_hook_priorities { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) -int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); +int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); static inline void br_drop_fake_rtable(struct sk_buff *skb) { diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 8ca6d6464ea3..2ea517c7c6b9 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -111,9 +111,9 @@ struct ebt_table { extern struct ebt_table *ebt_register_table(struct net *net, const struct ebt_table *table); extern void ebt_unregister_table(struct net *net, struct ebt_table *table); -extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, - const struct net_device *in, const struct net_device *out, - struct ebt_table *table); +extern unsigned int ebt_do_table(struct sk_buff *skb, + const struct nf_hook_state *state, + struct ebt_table *table); /* Used in the kernel match() functions */ #define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index cb0727fe2b3d..187feabe557c 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h @@ -17,7 +17,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb) nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, - skb->dev, NULL, NULL); + skb->dev, NULL, dev_net(skb->dev), NULL); return nf_hook_slow(skb, &state); } diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 6e4591bb54d4..98c03b2462b5 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -6,7 +6,7 @@ #include <uapi/linux/netfilter_ipv4.h> -int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type); +int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 4073510da485..aa598f942c01 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -64,7 +64,6 @@ struct ipt_error { extern void *ipt_alloc_initial_table(const struct xt_table *); extern unsigned int ipt_do_table(struct sk_buff *skb, - unsigned int hook, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 771574677e83..47c6b04c28c0 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -17,12 +17,12 @@ struct nf_ipv6_ops { int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); void (*route_input)(struct sk_buff *skb); - int (*fragment)(struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)); + int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)); }; #ifdef CONFIG_NETFILTER -int ip6_route_me_harder(struct sk_buff *skb); +int ip6_route_me_harder(struct net *net, struct sk_buff *skb); __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index b40d2b635778..0f76e5c674f9 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -30,7 +30,6 @@ extern struct xt_table *ip6t_register_table(struct net *net, const struct ip6t_replace *repl); extern void ip6t_unregister_table(struct net *net, struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, - unsigned int hook, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 00121f298269..e7e78537aea2 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -130,6 +130,7 @@ enum nfs_opnum4 { OP_READ_PLUS = 68, OP_SEEK = 69, OP_WRITE_SAME = 70, + OP_CLONE = 71, OP_ILLEGAL = 10044, }; @@ -421,6 +422,7 @@ enum lock_type4 { #define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) +#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) /* MDS threshold bitmap bits */ @@ -501,6 +503,7 @@ enum { NFSPROC4_CLNT_ALLOCATE, NFSPROC4_CLNT_DEALLOCATE, NFSPROC4_CLNT_LAYOUTSTATS, + NFSPROC4_CLNT_CLONE, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 570a7df2775b..2469ab0bb3a1 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -147,6 +147,7 @@ struct nfs_server { unsigned int acdirmax; unsigned int namelen; unsigned int options; /* extra options enabled by mount */ + unsigned int clone_blksize; /* granularity of a CLONE operation */ #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ #define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ @@ -243,5 +244,6 @@ struct nfs_server { #define NFS_CAP_ALLOCATE (1U << 20) #define NFS_CAP_DEALLOCATE (1U << 21) #define NFS_CAP_LAYOUTSTATS (1U << 22) +#define NFS_CAP_CLONE (1U << 23) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 52faf7e96c65..570d630f98ae 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -141,6 +141,7 @@ struct nfs_fsinfo { __u32 lease_time; /* in seconds */ __u32 layouttype; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ + __u32 clone_blksize; /* granularity of a CLONE operation */ }; struct nfs_fsstat { @@ -359,6 +360,25 @@ struct nfs42_layoutstat_data { struct nfs42_layoutstat_res res; }; +struct nfs42_clone_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh *src_fh; + struct nfs_fh *dst_fh; + nfs4_stateid src_stateid; + nfs4_stateid dst_stateid; + __u64 src_offset; + __u64 dst_offset; + __u64 count; + const u32 *dst_bitmask; +}; + +struct nfs42_clone_res { + struct nfs4_sequence_res seq_res; + unsigned int rpc_status; + struct nfs_fattr *dst_fattr; + const struct nfs_server *server; +}; + struct stateowner_id { __u64 create_time; __u32 uniquifier; @@ -528,7 +548,7 @@ struct nfs4_delegreturnargs { struct nfs4_delegreturnres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; - const struct nfs_server *server; + struct nfs_server *server; }; /* @@ -601,7 +621,7 @@ struct nfs_removeargs { struct nfs_removeres { struct nfs4_sequence_res seq_res; - const struct nfs_server *server; + struct nfs_server *server; struct nfs_fattr *dir_attr; struct nfs4_change_info cinfo; }; @@ -619,7 +639,7 @@ struct nfs_renameargs { struct nfs_renameres { struct nfs4_sequence_res seq_res; - const struct nfs_server *server; + struct nfs_server *server; struct nfs4_change_info old_cinfo; struct nfs_fattr *old_fattr; struct nfs4_change_info new_cinfo; @@ -685,7 +705,6 @@ struct nfs_setaclargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; - unsigned int acl_pgbase; struct page ** acl_pages; }; @@ -697,7 +716,6 @@ struct nfs_getaclargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; - unsigned int acl_pgbase; struct page ** acl_pages; }; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 78488e099ce7..7ec5b86735f3 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -73,6 +73,7 @@ extern int watchdog_user_enabled; extern int watchdog_thresh; extern unsigned long *watchdog_cpumask_bits; extern int sysctl_softlockup_all_cpu_backtrace; +extern int sysctl_hardlockup_all_cpu_backtrace; struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b5812c395351..3af5f454c04a 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -15,10 +15,7 @@ #ifndef _LINUX_NVME_H #define _LINUX_NVME_H -#include <uapi/linux/nvme.h> -#include <linux/pci.h> -#include <linux/kref.h> -#include <linux/blk-mq.h> +#include <linux/types.h> struct nvme_bar { __u64 cap; /* Controller Capabilities */ @@ -76,115 +73,528 @@ enum { NVME_CSTS_SHST_MASK = 3 << 2, }; -extern unsigned char nvme_io_timeout; -#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) +struct nvme_id_power_state { + __le16 max_power; /* centiwatts */ + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; /* microseconds */ + __le32 exit_lat; /* microseconds */ + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; -/* - * Represents an NVM Express device. Each nvme_dev is a PCI function. - */ -struct nvme_dev { - struct list_head node; - struct nvme_queue **queues; - struct request_queue *admin_q; - struct blk_mq_tag_set tagset; - struct blk_mq_tag_set admin_tagset; - u32 __iomem *dbs; - struct device *dev; - struct dma_pool *prp_page_pool; - struct dma_pool *prp_small_pool; - int instance; - unsigned queue_count; - unsigned online_queues; - unsigned max_qid; - int q_depth; - u32 db_stride; - u32 ctrl_config; - struct msix_entry *entry; - struct nvme_bar __iomem *bar; - struct list_head namespaces; - struct kref kref; - struct device *device; - work_func_t reset_workfn; - struct work_struct reset_work; - struct work_struct probe_work; - struct work_struct scan_work; - char name[12]; - char serial[20]; - char model[40]; - char firmware_rev[8]; - bool subsystem; - u32 max_hw_sectors; - u32 stripe_size; - u32 page_size; - void __iomem *cmb; - dma_addr_t cmb_dma_addr; - u64 cmb_size; - u32 cmbsz; - u16 oncs; - u16 abort_limit; - u8 event_limit; - u8 vwc; +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, + NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, }; -/* - * An NVM Express namespace is equivalent to a SCSI LUN - */ -struct nvme_ns { - struct list_head list; +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 mic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __u8 rsvd84[172]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __u8 rsvd270[242]; + __u8 sqes; + __u8 cqes; + __u8 rsvd514[2]; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 rsvd531; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __u8 rsvd540[1508]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; - struct nvme_dev *dev; - struct request_queue *queue; - struct gendisk *disk; +enum { + NVME_CTRL_ONCS_COMPARE = 1 << 0, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, + NVME_CTRL_ONCS_DSM = 1 << 2, + NVME_CTRL_VWC_PRESENT = 1 << 0, +}; - unsigned ns_id; - int lba_shift; - u16 ms; - bool ext; - u8 pi_type; - u64 mode_select_num_blocks; - u32 mode_select_block_len; +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; }; -/* - * The nvme_iod describes the data in an I/O, including the list of PRP - * entries. You can't see it in this data structure because C doesn't let - * me express that. Use nvme_alloc_iod to ensure there's enough space - * allocated to store the PRP list. - */ -struct nvme_iod { - unsigned long private; /* For the use of the submitter of the I/O */ - int npages; /* In the PRP list. 0 means small pool in use */ - int offset; /* Of PRP list */ - int nents; /* Used in scatterlist */ - int length; /* Of data, in bytes */ - dma_addr_t first_dma; - struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ - struct scatterlist sg[0]; -}; - -static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) -{ - return (sector >> (ns->lba_shift - 9)); -} - -int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - void *buf, unsigned bufflen); -int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - void *buffer, void __user *ubuffer, unsigned bufflen, - u32 *result, unsigned timeout); -int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); -int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, - struct nvme_id_ns **id); -int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); -int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, - dma_addr_t dma_addr, u32 *result); -int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, - dma_addr_t dma_addr, u32 *result); - -struct sg_io_hdr; - -int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); -int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); -int nvme_sg_get_version_num(int __user *ip); +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 rsvd33; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __u16 rsvd46; + __le64 nvmcap[2]; + __u8 rsvd64[40]; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[16]; + __u8 rsvd192[192]; + __u8 vs[3712]; +}; + +enum { + NVME_NS_FEAT_THIN = 1 << 0, + NVME_NS_FLBAS_LBA_MASK = 0xf, + NVME_NS_FLBAS_META_EXT = 0x10, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 1 << 4, + NVME_NS_DPC_PI_FIRST = 1 << 3, + NVME_NS_DPC_PI_TYPE3 = 1 << 2, + NVME_NS_DPC_PI_TYPE2 = 1 << 1, + NVME_NS_DPC_PI_TYPE1 = 1 << 0, + NVME_NS_DPS_PI_FIRST = 1 << 3, + NVME_NS_DPS_PI_MASK = 0x7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +struct nvme_smart_log { + __u8 critical_warning; + __u8 temperature[2]; + __u8 avail_spare; + __u8 spare_thresh; + __u8 percent_used; + __u8 rsvd6[26]; + __u8 data_units_read[16]; + __u8 data_units_written[16]; + __u8 host_reads[16]; + __u8 host_writes[16]; + __u8 ctrl_busy_time[16]; + __u8 power_cycles[16]; + __u8 power_on_hours[16]; + __u8 unsafe_shutdowns[16]; + __u8 media_errors[16]; + __u8 num_err_log_entries[16]; + __le32 warning_temp_time; + __le32 critical_comp_time; + __le16 temp_sensor[8]; + __u8 rsvd216[296]; +}; + +enum { + NVME_SMART_CRIT_SPARE = 1 << 0, + NVME_SMART_CRIT_TEMPERATURE = 1 << 1, + NVME_SMART_CRIT_RELIABILITY = 1 << 2, + NVME_SMART_CRIT_MEDIA = 1 << 3, + NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0x0002, +}; + +struct nvme_lba_range_type { + __u8 type; + __u8 attributes; + __u8 rsvd2[14]; + __u64 slba; + __u64 nlb; + __u8 guid[16]; + __u8 rsvd48[16]; +}; + +enum { + NVME_LBART_TYPE_FS = 0x01, + NVME_LBART_TYPE_RAID = 0x02, + NVME_LBART_TYPE_CACHE = 0x03, + NVME_LBART_TYPE_SWAP = 0x04, + + NVME_LBART_ATTRIB_TEMP = 1 << 0, + NVME_LBART_ATTRIB_HIDE = 1 << 1, +}; + +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[13]; + struct { + __le16 cntlid; + __u8 rcsts; + __u8 resv3[5]; + __le64 hostid; + __le64 rkey; + } regctl_ds[]; +}; + +/* I/O commands */ + +enum nvme_opcode { + nvme_cmd_flush = 0x00, + nvme_cmd_write = 0x01, + nvme_cmd_read = 0x02, + nvme_cmd_write_uncor = 0x04, + nvme_cmd_compare = 0x05, + nvme_cmd_write_zeroes = 0x08, + nvme_cmd_dsm = 0x09, + nvme_cmd_resv_register = 0x0d, + nvme_cmd_resv_report = 0x0e, + nvme_cmd_resv_acquire = 0x11, + nvme_cmd_resv_release = 0x15, +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + __le64 prp1; + __le64 prp2; + __le32 cdw10[6]; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + __le64 prp1; + __le64 prp2; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +enum { + NVME_RW_LR = 1 << 15, + NVME_RW_FUA = 1 << 14, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0 << 4, + NVME_RW_DSM_LATENCY_IDLE = 1 << 4, + NVME_RW_DSM_LATENCY_NORM = 2 << 4, + NVME_RW_DSM_LATENCY_LOW = 3 << 4, + NVME_RW_DSM_SEQ_REQ = 1 << 6, + NVME_RW_DSM_COMPRESSED = 1 << 7, + NVME_RW_PRINFO_PRCHK_REF = 1 << 10, + NVME_RW_PRINFO_PRCHK_APP = 1 << 11, + NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, + NVME_RW_PRINFO_PRACT = 1 << 13, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +enum { + NVME_DSMGMT_IDR = 1 << 0, + NVME_DSMGMT_IDW = 1 << 1, + NVME_DSMGMT_AD = 1 << 2, +}; + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +/* Admin commands */ + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0x00, + nvme_admin_create_sq = 0x01, + nvme_admin_get_log_page = 0x02, + nvme_admin_delete_cq = 0x04, + nvme_admin_create_cq = 0x05, + nvme_admin_identify = 0x06, + nvme_admin_abort_cmd = 0x08, + nvme_admin_set_features = 0x09, + nvme_admin_get_features = 0x0a, + nvme_admin_async_event = 0x0c, + nvme_admin_activate_fw = 0x10, + nvme_admin_download_fw = 0x11, + nvme_admin_format_nvm = 0x80, + nvme_admin_security_send = 0x81, + nvme_admin_security_recv = 0x82, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = (1 << 0), + NVME_CQ_IRQ_ENABLED = (1 << 1), + NVME_SQ_PRIO_URGENT = (0 << 1), + NVME_SQ_PRIO_HIGH = (1 << 1), + NVME_SQ_PRIO_MEDIUM = (2 << 1), + NVME_SQ_PRIO_LOW = (3 << 1), + NVME_FEAT_ARBITRATION = 0x01, + NVME_FEAT_POWER_MGMT = 0x02, + NVME_FEAT_LBA_RANGE = 0x03, + NVME_FEAT_TEMP_THRESH = 0x04, + NVME_FEAT_ERR_RECOVERY = 0x05, + NVME_FEAT_VOLATILE_WC = 0x06, + NVME_FEAT_NUM_QUEUES = 0x07, + NVME_FEAT_IRQ_COALESCE = 0x08, + NVME_FEAT_IRQ_CONFIG = 0x09, + NVME_FEAT_WRITE_ATOMIC = 0x0a, + NVME_FEAT_ASYNC_EVENT = 0x0b, + NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_SW_PROGRESS = 0x80, + NVME_FEAT_HOST_ID = 0x81, + NVME_FEAT_RESV_MASK = 0x82, + NVME_FEAT_RESV_PERSIST = 0x83, + NVME_LOG_ERROR = 0x01, + NVME_LOG_SMART = 0x02, + NVME_LOG_FW_SLOT = 0x03, + NVME_LOG_RESERVATION = 0x80, + NVME_FWACT_REPL = (0 << 3), + NVME_FWACT_REPL_ACTV = (1 << 3), + NVME_FWACT_ACTV = (2 << 3), +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 cns; + __u32 rsvd11[5]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 fid; + __le32 dword11; + __u32 rsvd12[4]; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_abort_cmd abort; + }; +}; + +enum { + NVME_SC_SUCCESS = 0x0, + NVME_SC_INVALID_OPCODE = 0x1, + NVME_SC_INVALID_FIELD = 0x2, + NVME_SC_CMDID_CONFLICT = 0x3, + NVME_SC_DATA_XFER_ERROR = 0x4, + NVME_SC_POWER_LOSS = 0x5, + NVME_SC_INTERNAL = 0x6, + NVME_SC_ABORT_REQ = 0x7, + NVME_SC_ABORT_QUEUE = 0x8, + NVME_SC_FUSED_FAIL = 0x9, + NVME_SC_FUSED_MISSING = 0xa, + NVME_SC_INVALID_NS = 0xb, + NVME_SC_CMD_SEQ_ERROR = 0xc, + NVME_SC_SGL_INVALID_LAST = 0xd, + NVME_SC_SGL_INVALID_COUNT = 0xe, + NVME_SC_SGL_INVALID_DATA = 0xf, + NVME_SC_SGL_INVALID_METADATA = 0x10, + NVME_SC_SGL_INVALID_TYPE = 0x11, + NVME_SC_LBA_RANGE = 0x80, + NVME_SC_CAP_EXCEEDED = 0x81, + NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_RESERVATION_CONFLICT = 0x83, + NVME_SC_CQ_INVALID = 0x100, + NVME_SC_QID_INVALID = 0x101, + NVME_SC_QUEUE_SIZE = 0x102, + NVME_SC_ABORT_LIMIT = 0x103, + NVME_SC_ABORT_MISSING = 0x104, + NVME_SC_ASYNC_LIMIT = 0x105, + NVME_SC_FIRMWARE_SLOT = 0x106, + NVME_SC_FIRMWARE_IMAGE = 0x107, + NVME_SC_INVALID_VECTOR = 0x108, + NVME_SC_INVALID_LOG_PAGE = 0x109, + NVME_SC_INVALID_FORMAT = 0x10a, + NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, + NVME_SC_INVALID_QUEUE = 0x10c, + NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, + NVME_SC_FEATURE_NOT_PER_NS = 0x10f, + NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + NVME_SC_BAD_ATTRIBUTES = 0x180, + NVME_SC_INVALID_PI = 0x181, + NVME_SC_READ_ONLY = 0x182, + NVME_SC_WRITE_FAULT = 0x280, + NVME_SC_READ_ERROR = 0x281, + NVME_SC_GUARD_CHECK = 0x282, + NVME_SC_APPTAG_CHECK = 0x283, + NVME_SC_REFTAG_CHECK = 0x284, + NVME_SC_COMPARE_FAILED = 0x285, + NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_DNR = 0x4000, +}; + +struct nvme_completion { + __le32 result; /* Used by admin commands to return data */ + __u32 rsvd; + __le16 sq_head; /* how much of this queue may be reclaimed */ + __le16 sq_id; /* submission queue that generated this entry */ + __u16 command_id; /* of the command which completed */ + __le16 status; /* did the command fail, and if so, why? */ +}; + +#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) #endif /* _LINUX_NVME_H */ diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 98ba7525929e..36112cdd665a 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h @@ -34,7 +34,7 @@ struct of_dma_filter_info { dma_filter_fn filter_fn; }; -#ifdef CONFIG_OF +#ifdef CONFIG_DMA_OF extern int of_dma_controller_register(struct device_node *np, struct dma_chan *(*of_dma_xlate) (struct of_phandle_args *, struct of_dma *), diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f3191828f037..87d6d1632dd4 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -29,6 +29,7 @@ struct device_node; */ enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 0x1, + OF_GPIO_SINGLE_ENDED = 0x2, }; #ifdef CONFIG_OF_GPIO diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 4bcbd586a672..039f2eec49ce 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -46,6 +46,12 @@ extern int of_irq_get(struct device_node *dev, int index); extern int of_irq_get_byname(struct device_node *dev, const char *name); extern int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs); +extern struct irq_domain *of_msi_get_domain(struct device *dev, + struct device_node *np, + enum irq_domain_bus_token token); +extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, + u32 rid); +extern void of_msi_configure(struct device *dev, struct device_node *np); #else static inline int of_irq_count(struct device_node *dev) { @@ -64,28 +70,42 @@ static inline int of_irq_to_resource_table(struct device_node *dev, { return 0; } +static inline struct irq_domain *of_msi_get_domain(struct device *dev, + struct device_node *np, + enum irq_domain_bus_token token) +{ + return NULL; +} +static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, + u32 rid) +{ + return NULL; +} +static inline void of_msi_configure(struct device *dev, struct device_node *np) +{ +} #endif -#if defined(CONFIG_OF) +#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) /* * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC * implements it differently. However, the prototype is the same for all, * so declare it here regardless of the CONFIG_OF_IRQ setting. */ extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); -extern struct device_node *of_irq_find_parent(struct device_node *child); -extern void of_msi_configure(struct device *dev, struct device_node *np); +u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); -#else /* !CONFIG_OF */ +#else /* !CONFIG_OF && !CONFIG_SPARC */ static inline unsigned int irq_of_parse_and_map(struct device_node *dev, int index) { return 0; } -static inline void *of_irq_find_parent(struct device_node *child) +static inline u32 of_msi_map_rid(struct device *dev, + struct device_node *msi_np, u32 rid_in) { - return NULL; + return rid_in; } #endif /* !CONFIG_OF */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 29fd3fe1c035..38c0533a3359 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -17,6 +17,7 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); int of_pci_parse_bus_range(struct device_node *node, struct resource *res); int of_get_pci_domain_nr(struct device_node *node); void of_pci_dma_configure(struct pci_dev *pci_dev); +void of_pci_check_probe_only(void); #else static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) { @@ -53,6 +54,8 @@ of_get_pci_domain_nr(struct device_node *node) } static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } + +static inline void of_pci_check_probe_only(void) { } #endif #if defined(CONFIG_OF_ADDRESS) diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index e5a70132a240..88fa8af2b937 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -17,7 +17,7 @@ #include <linux/platform_device.h> -#define INT_DMA_LCD 25 +#define INT_DMA_LCD (NR_IRQS_LEGACY + 25) #define OMAP1_DMA_TOUT_IRQ (1 << 0) #define OMAP_DMA_DROP_IRQ (1 << 1) diff --git a/include/linux/once.h b/include/linux/once.h new file mode 100644 index 000000000000..285f12cb40e6 --- /dev/null +++ b/include/linux/once.h @@ -0,0 +1,57 @@ +#ifndef _LINUX_ONCE_H +#define _LINUX_ONCE_H + +#include <linux/types.h> +#include <linux/jump_label.h> + +bool __do_once_start(bool *done, unsigned long *flags); +void __do_once_done(bool *done, struct static_key *once_key, + unsigned long *flags); + +/* Call a function exactly once. The idea of DO_ONCE() is to perform + * a function call such as initialization of random seeds, etc, only + * once, where DO_ONCE() can live in the fast-path. After @func has + * been called with the passed arguments, the static key will patch + * out the condition into a nop. DO_ONCE() guarantees type safety of + * arguments! + * + * Not that the following is not equivalent ... + * + * DO_ONCE(func, arg); + * DO_ONCE(func, arg); + * + * ... to this version: + * + * void foo(void) + * { + * DO_ONCE(func, arg); + * } + * + * foo(); + * foo(); + * + * In case the one-time invocation could be triggered from multiple + * places, then a common helper function must be defined, so that only + * a single static key will be placed there! + */ +#define DO_ONCE(func, ...) \ + ({ \ + bool ___ret = false; \ + static bool ___done = false; \ + static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \ + if (static_key_true(&___once_key)) { \ + unsigned long ___flags; \ + ___ret = __do_once_start(&___done, &___flags); \ + if (unlikely(___ret)) { \ + func(__VA_ARGS__); \ + __do_once_done(&___done, &___once_key, \ + &___flags); \ + } \ + } \ + ___ret; \ + }) + +#define get_random_once(buf, nbytes) \ + DO_ONCE(get_random_bytes, (buf), (nbytes)) + +#endif /* _LINUX_ONCE_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 416509e26d6d..bb53c7b86315 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -86,12 +86,7 @@ enum pageflags { PG_private, /* If pagecache, has fs-private data */ PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ -#ifdef CONFIG_PAGEFLAGS_EXTENDED PG_head, /* A head page */ - PG_tail, /* A tail page */ -#else - PG_compound, /* A compound page */ -#endif PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ @@ -256,7 +251,7 @@ PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) * Must use a macro here due to header dependency issues. page_zone() is not * available at this point. */ -#define PageHighMem(__p) is_highmem(page_zone(__p)) +#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) #else PAGEFLAG_FALSE(HighMem) #endif @@ -398,85 +393,46 @@ static inline void set_page_writeback_keepwrite(struct page *page) test_set_page_writeback_keepwrite(page); } -#ifdef CONFIG_PAGEFLAGS_EXTENDED -/* - * System with lots of page flags available. This allows separate - * flags for PageHead() and PageTail() checks of compound pages so that bit - * tests can be used in performance sensitive paths. PageCompound is - * generally not used in hot code paths except arch/powerpc/mm/init_64.c - * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages - * and avoid handling those in real mode. - */ __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) -__PAGEFLAG(Tail, tail) -static inline int PageCompound(struct page *page) -{ - return page->flags & ((1L << PG_head) | (1L << PG_tail)); - -} -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline void ClearPageCompound(struct page *page) +static inline int PageTail(struct page *page) { - BUG_ON(!PageHead(page)); - ClearPageHead(page); + return READ_ONCE(page->compound_head) & 1; } -#endif - -#define PG_head_mask ((1L << PG_head)) -#else -/* - * Reduce page flag use as much as possible by overlapping - * compound page flags with the flags used for page cache pages. Possible - * because PageCompound is always set for compound pages and not for - * pages on the LRU and/or pagecache. - */ -TESTPAGEFLAG(Compound, compound) -__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) - -/* - * PG_reclaim is used in combination with PG_compound to mark the - * head and tail of a compound page. This saves one page flag - * but makes it impossible to use compound pages for the page cache. - * The PG_reclaim bit would have to be used for reclaim or readahead - * if compound pages enter the page cache. - * - * PG_compound & PG_reclaim => Tail page - * PG_compound & ~PG_reclaim => Head page - */ -#define PG_head_mask ((1L << PG_compound)) -#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) - -static inline int PageHead(struct page *page) +static inline void set_compound_head(struct page *page, struct page *head) { - return ((page->flags & PG_head_tail_mask) == PG_head_mask); + WRITE_ONCE(page->compound_head, (unsigned long)head + 1); } -static inline int PageTail(struct page *page) +static inline void clear_compound_head(struct page *page) { - return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); + WRITE_ONCE(page->compound_head, 0); } -static inline void __SetPageTail(struct page *page) +static inline struct page *compound_head(struct page *page) { - page->flags |= PG_head_tail_mask; + unsigned long head = READ_ONCE(page->compound_head); + + if (unlikely(head & 1)) + return (struct page *) (head - 1); + return page; } -static inline void __ClearPageTail(struct page *page) +static inline int PageCompound(struct page *page) { - page->flags &= ~PG_head_tail_mask; -} + return PageHead(page) || PageTail(page); +} #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { - BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); - clear_bit(PG_compound, &page->flags); + BUG_ON(!PageHead(page)); + ClearPageHead(page); } #endif -#endif /* !PAGEFLAGS_EXTENDED */ +#define PG_head_mask ((1L << PG_head)) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 17fa4f8de3a6..7e62920a3a94 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -36,9 +36,9 @@ static inline unsigned long page_counter_read(struct page_counter *counter) void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); -int page_counter_try_charge(struct page_counter *counter, - unsigned long nr_pages, - struct page_counter **fail); +bool page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); int page_counter_limit(struct page_counter *counter, unsigned long limit); int page_counter_memparse(const char *buf, const char *max, diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 2baeee12f48e..e942558b3585 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -44,7 +44,7 @@ enum pageblock_bits { #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE /* Huge page sizes are variable */ -extern int pageblock_order; +extern unsigned int pageblock_order; #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a6c78e00ea96..26eabf5ec718 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -69,6 +69,13 @@ static inline gfp_t mapping_gfp_mask(struct address_space * mapping) return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; } +/* Restricts the given gfp_mask to what the mapping allows. */ +static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, + gfp_t gfp_mask) +{ + return mapping_gfp_mask(mapping) & gfp_mask; +} + /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index a965efa52152..89ab0572dbc6 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -52,6 +52,30 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) return ACPI_HANDLE(dev); } +struct acpi_pci_root; +struct acpi_pci_root_ops; + +struct acpi_pci_root_info { + struct acpi_pci_root *root; + struct acpi_device *bridge; + struct acpi_pci_root_ops *ops; + struct list_head resources; + char name[16]; +}; + +struct acpi_pci_root_ops { + struct pci_ops *pci_ops; + int (*init_info)(struct acpi_pci_root_info *info); + void (*release_info)(struct acpi_pci_root_info *info); + int (*prepare_resources)(struct acpi_pci_root_info *info); +}; + +extern int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info); +extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, + struct acpi_pci_root_ops *ops, + struct acpi_pci_root_info *info, + void *sd); + void acpi_pci_add_bus(struct pci_bus *bus); void acpi_pci_remove_bus(struct pci_bus *bus); diff --git a/include/linux/pci.h b/include/linux/pci.h index e90eb22de628..e828e7b4afec 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -820,6 +820,7 @@ void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); +struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); @@ -1192,6 +1193,17 @@ void pci_unregister_driver(struct pci_driver *dev); module_driver(__pci_driver, pci_register_driver, \ pci_unregister_driver) +/** + * builtin_pci_driver() - Helper macro for registering a PCI driver + * @__pci_driver: pci_driver struct + * + * Helper macro for PCI drivers which do not do anything special in their + * init code. This eliminates a lot of boilerplate. Each driver may only + * use this macro once, and calling it replaces device_initcall(...) + */ +#define builtin_pci_driver(__pci_driver) \ + builtin_driver(__pci_driver, pci_register_driver) + struct pci_driver *pci_dev_driver(const struct pci_dev *dev); int pci_add_dynid(struct pci_driver *drv, unsigned int vendor, unsigned int device, diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 834c4e52cb2d..c2fa3ecb0dce 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -5,11 +5,12 @@ #include <linux/rwsem.h> #include <linux/percpu.h> #include <linux/wait.h> +#include <linux/rcu_sync.h> #include <linux/lockdep.h> struct percpu_rw_semaphore { + struct rcu_sync rss; unsigned int __percpu *fast_read_ctr; - atomic_t write_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 092a0e8a479a..d841d33bcdc9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -140,33 +140,67 @@ struct hw_perf_event { }; #endif }; + /* + * If the event is a per task event, this will point to the task in + * question. See the comment in perf_event_alloc(). + */ struct task_struct *target; + +/* + * hw_perf_event::state flags; used to track the PERF_EF_* state. + */ +#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ +#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ +#define PERF_HES_ARCH 0x04 + int state; + + /* + * The last observed hardware counter value, updated with a + * local64_cmpxchg() such that pmu::read() can be called nested. + */ local64_t prev_count; + + /* + * The period to start the next sample with. + */ u64 sample_period; + + /* + * The period we started this sample with. + */ u64 last_period; + + /* + * However much is left of the current period; note that this is + * a full 64bit value and allows for generation of periods longer + * than hardware might allow. + */ local64_t period_left; + + /* + * State for throttling the event, see __perf_event_overflow() and + * perf_adjust_freq_unthr_context(). + */ u64 interrupts_seq; u64 interrupts; + /* + * State for freq target events, see __perf_event_overflow() and + * perf_adjust_freq_unthr_context(). + */ u64 freq_time_stamp; u64 freq_count_stamp; #endif }; -/* - * hw_perf_event::state flags - */ -#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ -#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ -#define PERF_HES_ARCH 0x04 - struct perf_event; /* * Common implementation detail of pmu::{start,commit,cancel}_txn */ -#define PERF_EVENT_TXN 0x1 +#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ +#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ /** * pmu::capabilities flags @@ -210,7 +244,19 @@ struct pmu { /* * Try and initialize the event for this PMU. - * Should return -ENOENT when the @event doesn't match this PMU. + * + * Returns: + * -ENOENT -- @event is not for this PMU + * + * -ENODEV -- @event is for this PMU but PMU not present + * -EBUSY -- @event is for this PMU but PMU temporarily unavailable + * -EINVAL -- @event is for this PMU but @event is not valid + * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported + * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges + * + * 0 -- @event is for this PMU and valid + * + * Other error return values are allowed. */ int (*event_init) (struct perf_event *event); @@ -221,27 +267,61 @@ struct pmu { void (*event_mapped) (struct perf_event *event); /*optional*/ void (*event_unmapped) (struct perf_event *event); /*optional*/ + /* + * Flags for ->add()/->del()/ ->start()/->stop(). There are + * matching hw_perf_event::state flags. + */ #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ /* - * Adds/Removes a counter to/from the PMU, can be done inside - * a transaction, see the ->*_txn() methods. + * Adds/Removes a counter to/from the PMU, can be done inside a + * transaction, see the ->*_txn() methods. + * + * The add/del callbacks will reserve all hardware resources required + * to service the event, this includes any counter constraint + * scheduling etc. + * + * Called with IRQs disabled and the PMU disabled on the CPU the event + * is on. + * + * ->add() called without PERF_EF_START should result in the same state + * as ->add() followed by ->stop(). + * + * ->del() must always PERF_EF_UPDATE stop an event. If it calls + * ->stop() that must deal with already being stopped without + * PERF_EF_UPDATE. */ int (*add) (struct perf_event *event, int flags); void (*del) (struct perf_event *event, int flags); /* - * Starts/Stops a counter present on the PMU. The PMI handler - * should stop the counter when perf_event_overflow() returns - * !0. ->start() will be used to continue. + * Starts/Stops a counter present on the PMU. + * + * The PMI handler should stop the counter when perf_event_overflow() + * returns !0. ->start() will be used to continue. + * + * Also used to change the sample period. + * + * Called with IRQs disabled and the PMU disabled on the CPU the event + * is on -- will be called from NMI context with the PMU generates + * NMIs. + * + * ->stop() with PERF_EF_UPDATE will read the counter and update + * period/count values like ->read() would. + * + * ->start() with PERF_EF_RELOAD will reprogram the the counter + * value, must be preceded by a ->stop() with PERF_EF_UPDATE. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); /* * Updates the counter value of the event. + * + * For sampling capable PMUs this will also update the software period + * hw_perf_event::period_left field. */ void (*read) (struct perf_event *event); @@ -252,20 +332,26 @@ struct pmu { * * Start the transaction, after this ->add() doesn't need to * do schedulability tests. + * + * Optional. */ - void (*start_txn) (struct pmu *pmu); /* optional */ + void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. + * + * Optional. */ - int (*commit_txn) (struct pmu *pmu); /* optional */ + int (*commit_txn) (struct pmu *pmu); /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. + * + * Optional. */ - void (*cancel_txn) (struct pmu *pmu); /* optional */ + void (*cancel_txn) (struct pmu *pmu); /* * Will return the value for perf_event_mmap_page::index for this event, diff --git a/include/linux/phy.h b/include/linux/phy.h index 4a4e3a092337..05fde31b6dc6 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -213,7 +213,9 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); #define PHY_INTERRUPT_DISABLED 0x0 @@ -798,6 +800,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); void phy_device_free(struct phy_device *phydev); +int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h index 281cb91ddcf5..05082e407c4a 100644 --- a/include/linux/pinctrl/devinfo.h +++ b/include/linux/pinctrl/devinfo.h @@ -24,10 +24,14 @@ * struct dev_pin_info - pin state container for devices * @p: pinctrl handle for the containing device * @default_state: the default state for the handle, if found + * @init_state: the state at probe time, if found + * @sleep_state: the state at suspend time, if found + * @idle_state: the state at idle (runtime suspend) time, if found */ struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; + struct pinctrl_state *init_state; #ifdef CONFIG_PM struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; @@ -35,6 +39,7 @@ struct dev_pin_info { }; extern int pinctrl_bind_pins(struct device *dev); +extern int pinctrl_init_done(struct device *dev); #else @@ -45,5 +50,10 @@ static inline int pinctrl_bind_pins(struct device *dev) return 0; } +static inline int pinctrl_init_done(struct device *dev) +{ + return 0; +} + #endif /* CONFIG_PINCTRL */ #endif /* PINCTRL_DEVINFO_H */ diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index fe65962b264f..d921afd5f109 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -20,6 +20,11 @@ /** * enum pin_config_param - possible pin configuration parameters + * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it + * weakly drives the last value on a tristate bus, also known as a "bus + * holder", "bus keeper" or "repeater". This allows another device on the + * bus to change the value by driving the bus high or low and switching to + * tristate. The argument is ignored. * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a * transition from say pull-up to pull-down implies that you disable * pull-up in the process, this setting disables all biasing. @@ -29,14 +34,6 @@ * if for example some other pin is going to drive the signal connected * to it for a while. Pins used for input are usually always high * impedance. - * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it - * weakly drives the last value on a tristate bus, also known as a "bus - * holder", "bus keeper" or "repeater". This allows another device on the - * bus to change the value by driving the bus high or low and switching to - * tristate. The argument is ignored. - * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high - * impedance to VDD). If the argument is != 0 pull-up is enabled, - * if it is 0, pull-up is total, i.e. the pin is connected to VDD. * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high * impedance to GROUND). If the argument is != 0 pull-down is enabled, * if it is 0, pull-down is total, i.e. the pin is connected to GROUND. @@ -48,10 +45,9 @@ * If the argument is != 0 pull up/down is enabled, if it is 0, the * configuration is ignored. The proper way to disable it is to use * @PIN_CONFIG_BIAS_DISABLE. - * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and - * low, this is the most typical case and is typically achieved with two - * active transistors on the output. Setting this config will enable - * push-pull mode, the argument is ignored. + * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high + * impedance to VDD). If the argument is != 0 pull-up is enabled, + * if it is 0, pull-up is total, i.e. the pin is connected to VDD. * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open * collector) which means it is usually wired with other output ports * which are then pulled up with an external resistor. Setting this @@ -59,28 +55,26 @@ * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source * (open emitter). Setting this config will enable open source mode, the * argument is ignored. + * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and + * low, this is the most typical case and is typically achieved with two + * active transistors on the output. Setting this config will enable + * push-pull mode, the argument is ignored. * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current * passed as argument. The argument is in mA. + * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, + * which means it will wait for signals to settle when reading inputs. The + * argument gives the debounce time in usecs. Setting the + * argument to zero turns debouncing off. * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not * affect the pin's ability to drive output. 1 enables input, 0 disables * input. - * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. - * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, - * schmitt-trigger mode is disabled. * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, * the threshold value is given on a custom format as argument when * setting pins to this mode. - * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, - * which means it will wait for signals to settle when reading inputs. The - * argument gives the debounce time in usecs. Setting the - * argument to zero turns debouncing off. - * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power - * supplies, the argument to this parameter (on a custom format) tells - * the driver which alternative power source to use. - * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to - * this parameter (on a custom format) tells the driver which alternative - * slew rate to use. + * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. + * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, + * schmitt-trigger mode is disabled. * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 @@ -89,29 +83,35 @@ * 1 to indicate high level, argument 0 to indicate low level. (Please * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a * discussion around this parameter.) + * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power + * supplies, the argument to this parameter (on a custom format) tells + * the driver which alternative power source to use. + * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to + * this parameter (on a custom format) tells the driver which alternative + * slew rate to use. * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. */ enum pin_config_param { + PIN_CONFIG_BIAS_BUS_HOLD, PIN_CONFIG_BIAS_DISABLE, PIN_CONFIG_BIAS_HIGH_IMPEDANCE, - PIN_CONFIG_BIAS_BUS_HOLD, - PIN_CONFIG_BIAS_PULL_UP, PIN_CONFIG_BIAS_PULL_DOWN, PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, - PIN_CONFIG_DRIVE_PUSH_PULL, + PIN_CONFIG_BIAS_PULL_UP, PIN_CONFIG_DRIVE_OPEN_DRAIN, PIN_CONFIG_DRIVE_OPEN_SOURCE, + PIN_CONFIG_DRIVE_PUSH_PULL, PIN_CONFIG_DRIVE_STRENGTH, + PIN_CONFIG_INPUT_DEBOUNCE, PIN_CONFIG_INPUT_ENABLE, - PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, - PIN_CONFIG_INPUT_DEBOUNCE, - PIN_CONFIG_POWER_SOURCE, - PIN_CONFIG_SLEW_RATE, + PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_LOW_POWER_MODE, PIN_CONFIG_OUTPUT, + PIN_CONFIG_POWER_SOURCE, + PIN_CONFIG_SLEW_RATE, PIN_CONFIG_END = 0x7FFF, }; diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h index b5919f8e6d1a..23073519339f 100644 --- a/include/linux/pinctrl/pinctrl-state.h +++ b/include/linux/pinctrl/pinctrl-state.h @@ -9,6 +9,13 @@ * hogs to configure muxing and pins at boot, and also as a state * to go into when returning from sleep and idle in * .pm_runtime_resume() or ordinary .resume() for example. + * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default" + * before the driver's probe() function is called. There are some + * drivers where that is not appropriate becausing doing so would + * glitch the pins. In those cases you can add an "init" pinctrl + * which is the state of the pins before drive probe. After probe + * if the pins are still in "init" state they'll be moved to + * "default". * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into * when the pins are idle. This is a state where the system is relaxed * but not fully sleeping - some power may be on but clocks gated for @@ -20,5 +27,6 @@ * ordinary .suspend() function. */ #define PINCTRL_STATE_DEFAULT "default" +#define PINCTRL_STATE_INIT "init" #define PINCTRL_STATE_IDLE "idle" #define PINCTRL_STATE_SLEEP "sleep" diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index 527a85c61924..91b16adab0cd 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -19,21 +19,6 @@ #include <linux/serial.h> #include <linux/platform_data/macb.h> -/* - * at91: 6 USARTs and one DBGU port (SAM9260) - * avr32: 4 - */ -#define ATMEL_MAX_UART 7 - - /* USB Device */ -struct at91_udc_data { - int vbus_pin; /* high == host powering us */ - u8 vbus_active_low; /* vbus polarity */ - u8 vbus_polled; /* Use polling, not interrupt */ - int pullup_pin; /* active == D+ pulled up */ - u8 pullup_active_low; /* true == pullup_pin is active low */ -}; - /* Compact Flash */ struct at91_cf_data { int irq_pin; /* I/O IRQ */ @@ -74,11 +59,6 @@ struct atmel_uart_data { struct serial_rs485 rs485; /* rs485 settings */ }; -/* CAN */ -struct at91_can_data { - void (*transceiver_switch)(int on); -}; - /* FIXME: this needs a better location, but gets stuff building again */ extern int at91_suspend_entering_slow_clock(void); diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 87ac14c584f2..03b6095d3b18 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -37,6 +37,7 @@ struct dw_dma_slave { * @nr_channels: Number of channels supported by hardware (max 8) * @is_private: The device channels should be marked as private and not for * by the general purpose DMA channel allocator. + * @is_memcpy: The device channels do support memory-to-memory transfers. * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller @@ -47,6 +48,7 @@ struct dw_dma_slave { struct dw_dma_platform_data { unsigned int nr_channels; bool is_private; + bool is_memcpy; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h index 8a1f6a4920b2..3453fa655502 100644 --- a/include/linux/platform_data/dma-hsu.h +++ b/include/linux/platform_data/dma-hsu.h @@ -18,8 +18,4 @@ struct hsu_dma_slave { int chan_id; }; -struct hsu_dma_platform_data { - unsigned short nr_channels; -}; - #endif /* _PLATFORM_DATA_DMA_HSU_H */ diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index bdb2710e2aab..e2878baeb90e 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -41,51 +41,6 @@ #ifndef EDMA_H_ #define EDMA_H_ -/* PaRAM slots are laid out like this */ -struct edmacc_param { - u32 opt; - u32 src; - u32 a_b_cnt; - u32 dst; - u32 src_dst_bidx; - u32 link_bcntrld; - u32 src_dst_cidx; - u32 ccnt; -} __packed; - -/* fields in edmacc_param.opt */ -#define SAM BIT(0) -#define DAM BIT(1) -#define SYNCDIM BIT(2) -#define STATIC BIT(3) -#define EDMA_FWID (0x07 << 8) -#define TCCMODE BIT(11) -#define EDMA_TCC(t) ((t) << 12) -#define TCINTEN BIT(20) -#define ITCINTEN BIT(21) -#define TCCHEN BIT(22) -#define ITCCHEN BIT(23) - -/*ch_status paramater of callback function possible values*/ -#define EDMA_DMA_COMPLETE 1 -#define EDMA_DMA_CC_ERROR 2 -#define EDMA_DMA_TC1_ERROR 3 -#define EDMA_DMA_TC2_ERROR 4 - -enum address_mode { - INCR = 0, - FIFO = 1 -}; - -enum fifo_width { - W8BIT = 0, - W16BIT = 1, - W32BIT = 2, - W64BIT = 3, - W128BIT = 4, - W256BIT = 5 -}; - enum dma_event_q { EVENTQ_0 = 0, EVENTQ_1 = 1, @@ -94,64 +49,10 @@ enum dma_event_q { EVENTQ_DEFAULT = -1 }; -enum sync_dimension { - ASYNC = 0, - ABSYNC = 1 -}; - #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) #define EDMA_CTLR(i) ((i) >> 16) #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) -#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ -#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ -#define EDMA_CONT_PARAMS_ANY 1001 -#define EDMA_CONT_PARAMS_FIXED_EXACT 1002 -#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 - -#define EDMA_MAX_CC 2 - -/* alloc/free DMA channels and their dedicated parameter RAM slots */ -int edma_alloc_channel(int channel, - void (*callback)(unsigned channel, u16 ch_status, void *data), - void *data, enum dma_event_q); -void edma_free_channel(unsigned channel); - -/* alloc/free parameter RAM slots */ -int edma_alloc_slot(unsigned ctlr, int slot); -void edma_free_slot(unsigned slot); - -/* alloc/free a set of contiguous parameter RAM slots */ -int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count); -int edma_free_cont_slots(unsigned slot, int count); - -/* calls that operate on part of a parameter RAM slot */ -void edma_set_src(unsigned slot, dma_addr_t src_port, - enum address_mode mode, enum fifo_width); -void edma_set_dest(unsigned slot, dma_addr_t dest_port, - enum address_mode mode, enum fifo_width); -dma_addr_t edma_get_position(unsigned slot, bool dst); -void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); -void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); -void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, - u16 bcnt_rld, enum sync_dimension sync_mode); -void edma_link(unsigned from, unsigned to); -void edma_unlink(unsigned from); - -/* calls that operate on an entire parameter RAM slot */ -void edma_write_slot(unsigned slot, const struct edmacc_param *params); -void edma_read_slot(unsigned slot, struct edmacc_param *params); - -/* channel control operations */ -int edma_start(unsigned channel); -void edma_stop(unsigned channel); -void edma_clean_channel(unsigned channel); -void edma_clear_event(unsigned channel); -void edma_pause(unsigned channel); -void edma_resume(unsigned channel); - -void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no); - struct edma_rsv_info { const s16 (*rsv_chans)[2]; @@ -170,10 +71,11 @@ struct edma_soc_info { /* Resource reservation for other cores */ struct edma_rsv_info *rsv; + /* List of channels allocated for memcpy, terminated with -1 */ + s16 *memcpy_channels; + s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; }; -int edma_trigger_channel(unsigned); - #endif diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h index d2be19a51acd..3c85a735c380 100644 --- a/include/linux/platform_data/leds-kirkwood-netxbig.h +++ b/include/linux/platform_data/leds-kirkwood-netxbig.h @@ -40,6 +40,7 @@ struct netxbig_led { int mode_addr; int *mode_val; int bright_addr; + int bright_max; }; struct netxbig_led_platform_data { diff --git a/include/linux/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 11f00cdabe3d..11f00cdabe3d 100644 --- a/include/linux/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h index ac4ea2e641c7..394d15597dc7 100644 --- a/include/linux/platform_data/mtd-nand-pxa3xx.h +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h @@ -4,30 +4,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> -struct pxa3xx_nand_timing { - unsigned int tCH; /* Enable signal hold time */ - unsigned int tCS; /* Enable signal setup time */ - unsigned int tWH; /* ND_nWE high duration */ - unsigned int tWP; /* ND_nWE pulse time */ - unsigned int tRH; /* ND_nRE high duration */ - unsigned int tRP; /* ND_nRE pulse width */ - unsigned int tR; /* ND_nWE high to ND_nRE low for read */ - unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */ - unsigned int tAR; /* ND_ALE low to ND_nRE low delay */ -}; - -struct pxa3xx_nand_flash { - char *name; - uint32_t chip_id; - unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */ - unsigned int page_size; /* Page size in bytes (PAGE_SZ) */ - unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */ - unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */ - unsigned int num_blocks; /* Number of physical blocks in Flash */ - - struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ -}; - /* * Current pxa3xx_nand controller has two chip select which * both be workable. @@ -63,9 +39,6 @@ struct pxa3xx_nand_platform_data { const struct mtd_partition *parts[NUM_CHIP_SELECT]; unsigned int nr_parts[NUM_CHIP_SELECT]; - - const struct pxa3xx_nand_flash * flash; - size_t num_flash; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h index ac91707dabcb..a6f9d633f5be 100644 --- a/include/linux/platform_data/nfcmrvl.h +++ b/include/linux/platform_data/nfcmrvl.h @@ -35,6 +35,14 @@ struct nfcmrvl_platform_data { unsigned int flow_control; /* Tell if firmware supports break control for power management */ unsigned int break_control; + + + /* + * I2C specific + */ + + unsigned int irq; + unsigned int irq_polarity; }; #endif /* _NFCMRVL_PTF_H_ */ diff --git a/include/linux/platform_data/s3c-hsotg.h b/include/linux/platform_data/s3c-hsotg.h index 3f1cbf95ec3b..3982586ba6df 100644 --- a/include/linux/platform_data/s3c-hsotg.h +++ b/include/linux/platform_data/s3c-hsotg.h @@ -17,19 +17,19 @@ struct platform_device; -enum s3c_hsotg_dmamode { +enum dwc2_hsotg_dmamode { S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */ S3C_HSOTG_DMA_ONLY, /* always use DMA */ S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */ }; /** - * struct s3c_hsotg_plat - platform data for high-speed otg/udc + * struct dwc2_hsotg_plat - platform data for high-speed otg/udc * @dma: Whether to use DMA or not. * @is_osc: The clock source is an oscillator, not a crystal */ -struct s3c_hsotg_plat { - enum s3c_hsotg_dmamode dma; +struct dwc2_hsotg_plat { + enum dwc2_hsotg_dmamode dma; unsigned int is_osc:1; int phy_type; @@ -37,6 +37,6 @@ struct s3c_hsotg_plat { int (*phy_exit)(struct platform_device *pdev, int type); }; -extern void s3c_hsotg_set_platdata(struct s3c_hsotg_plat *pd); +extern void dwc2_hsotg_set_platdata(struct dwc2_hsotg_plat *pd); #endif /* __LINUX_USB_S3C_HSOTG_H */ diff --git a/include/linux/platform_data/st-nci.h b/include/linux/platform_data/st-nci.h index d9d400a297bd..f6494b347c06 100644 --- a/include/linux/platform_data/st-nci.h +++ b/include/linux/platform_data/st-nci.h @@ -24,6 +24,8 @@ struct st_nci_nfc_platform_data { unsigned int gpio_reset; unsigned int irq_polarity; + bool is_ese_present; + bool is_uicc_present; }; #endif /* _ST_NCI_H_ */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index bba08f44cc97..dc777be5f2e1 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -270,6 +270,14 @@ extern struct platform_device *__platform_create_bundle( struct resource *res, unsigned int n_res, const void *data, size_t size, struct module *module); +int __platform_register_drivers(struct platform_driver * const *drivers, + unsigned int count, struct module *owner); +void platform_unregister_drivers(struct platform_driver * const *drivers, + unsigned int count); + +#define platform_register_drivers(drivers, count) \ + __platform_register_drivers(drivers, count, THIS_MODULE) + /* early platform driver interface */ struct early_platform_driver { const char *class_str; diff --git a/include/linux/pm.h b/include/linux/pm.h index 35d599e7250d..528be6787796 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -732,6 +732,7 @@ extern int pm_generic_poweroff_noirq(struct device *dev); extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); +extern void pm_complete_with_resume_check(struct device *dev); #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index b1cf7e797892..ba4ced38efae 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -15,7 +15,6 @@ #include <linux/err.h> #include <linux/of.h> #include <linux/notifier.h> -#include <linux/cpuidle.h> /* Defines used for the flags field in the struct generic_pm_domain */ #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ @@ -38,11 +37,6 @@ struct gpd_dev_ops { bool (*active_wakeup)(struct device *dev); }; -struct gpd_cpuidle_data { - unsigned int saved_exit_latency; - struct cpuidle_state *idle_state; -}; - struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ @@ -53,7 +47,6 @@ struct generic_pm_domain { struct dev_power_governor *gov; struct work_struct power_off_work; const char *name; - unsigned int in_progress; /* Number of devices being suspended now */ atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ unsigned int device_count; /* Number of devices */ @@ -68,7 +61,6 @@ struct generic_pm_domain { s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; bool cached_power_down_ok; - struct gpd_cpuidle_data *cpuidle_data; int (*attach_dev)(struct generic_pm_domain *domain, struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, @@ -89,10 +81,8 @@ struct gpd_link { }; struct gpd_timing_data { - s64 stop_latency_ns; - s64 start_latency_ns; - s64 save_state_latency_ns; - s64 restore_state_latency_ns; + s64 suspend_latency_ns; + s64 resume_latency_ns; s64 effective_constraint_ns; bool constraint_changed; bool cached_stop_ok; @@ -125,29 +115,15 @@ extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td); -extern int __pm_genpd_name_add_device(const char *domain_name, - struct device *dev, - struct gpd_timing_data *td); - extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); -extern int pm_genpd_add_subdomain_names(const char *master_name, - const char *subdomain_name); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); -extern int pm_genpd_name_attach_cpuidle(const char *name, int state); -extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); -extern int pm_genpd_name_detach_cpuidle(const char *name); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); -extern int pm_genpd_poweron(struct generic_pm_domain *genpd); -extern int pm_genpd_name_poweron(const char *domain_name); -extern void pm_genpd_poweroff_unused(void); - extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; #else @@ -166,12 +142,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int __pm_genpd_name_add_device(const char *domain_name, - struct device *dev, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev) { @@ -182,45 +152,15 @@ static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int pm_genpd_add_subdomain_names(const char *master_name, - const char *subdomain_name) -{ - return -ENOSYS; -} static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target) { return -ENOSYS; } -static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) -{ - return -ENOSYS; -} -static inline int pm_genpd_name_attach_cpuidle(const char *name, int state) -{ - return -ENOSYS; -} -static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) -{ - return -ENOSYS; -} -static inline int pm_genpd_name_detach_cpuidle(const char *name) -{ - return -ENOSYS; -} static inline void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { } -static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) -{ - return -ENOSYS; -} -static inline int pm_genpd_name_poweron(const char *domain_name) -{ - return -ENOSYS; -} -static inline void pm_genpd_poweroff_unused(void) {} #endif static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, @@ -229,12 +169,6 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, return __pm_genpd_add_device(genpd, dev, NULL); } -static inline int pm_genpd_name_add_device(const char *domain_name, - struct device *dev) -{ - return __pm_genpd_name_add_device(domain_name, dev, NULL); -} - #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP extern void pm_genpd_syscore_poweroff(struct device *dev); extern void pm_genpd_syscore_poweron(struct device *dev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index e817722ee3f0..9a2e50337af9 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -132,37 +132,37 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) -int of_init_opp_table(struct device *dev); -void of_free_opp_table(struct device *dev); -int of_cpumask_init_opp_table(cpumask_var_t cpumask); -void of_cpumask_free_opp_table(cpumask_var_t cpumask); -int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); -int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); +int dev_pm_opp_of_add_table(struct device *dev); +void dev_pm_opp_of_remove_table(struct device *dev); +int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); +void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); #else -static inline int of_init_opp_table(struct device *dev) +static inline int dev_pm_opp_of_add_table(struct device *dev) { return -EINVAL; } -static inline void of_free_opp_table(struct device *dev) +static inline void dev_pm_opp_of_remove_table(struct device *dev) { } -static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask) +static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) { return -ENOSYS; } -static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask) +static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) { } -static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) { return -ENOSYS; } -static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) { return -ENOSYS; } diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h deleted file mode 100644 index a857f719bf40..000000000000 --- a/include/linux/power/bq27x00_battery.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __LINUX_BQ27X00_BATTERY_H__ -#define __LINUX_BQ27X00_BATTERY_H__ - -/** - * struct bq27000_plaform_data - Platform data for bq27000 devices - * @name: Name of the battery. If NULL the driver will fallback to "bq27000". - * @read: HDQ read callback. - * This function should provide access to the HDQ bus the battery is - * connected to. - * The first parameter is a pointer to the battery device, the second the - * register to be read. The return value should either be the content of - * the passed register or an error value. - */ -struct bq27000_platform_data { - const char *name; - int (*read)(struct device *dev, unsigned int); -}; - -#endif diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h new file mode 100644 index 000000000000..45f6a7b5b3cb --- /dev/null +++ b/include/linux/power/bq27xxx_battery.h @@ -0,0 +1,31 @@ +#ifndef __LINUX_BQ27X00_BATTERY_H__ +#define __LINUX_BQ27X00_BATTERY_H__ + +/** + * struct bq27xxx_plaform_data - Platform data for bq27xxx devices + * @name: Name of the battery. + * @chip: Chip class number of this device. + * @read: HDQ read callback. + * This function should provide access to the HDQ bus the battery is + * connected to. + * The first parameter is a pointer to the battery device, the second the + * register to be read. The return value should either be the content of + * the passed register or an error value. + */ +enum bq27xxx_chip { + BQ27000 = 1, /* bq27000, bq27200 */ + BQ27010, /* bq27010, bq27210 */ + BQ27500, /* bq27500, bq27510, bq27520 */ + BQ27530, /* bq27530, bq27531 */ + BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ + BQ27545, /* bq27545 */ + BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ +}; + +struct bq27xxx_platform_data { + const char *name; + enum bq27xxx_chip chip; + int (*read)(struct device *dev, unsigned int); +}; + +#endif diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index eadf28cb2fc9..c4fa907c8f14 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -65,7 +65,7 @@ struct charger_cable { const char *extcon_name; const char *name; - /* The charger-manager use Exton framework*/ + /* The charger-manager use Extcon framework */ struct extcon_specific_cable_nb extcon_dev; struct work_struct wq; struct notifier_block nb; @@ -94,7 +94,7 @@ struct charger_cable { * the charger will be maintained with disabled state. * @cables: * the array of charger cables to enable/disable charger - * and set current limit according to constratint data of + * and set current limit according to constraint data of * struct charger_cable if only charger cable included * in the array of charger cables is attached/detached. * @num_cables: the number of charger cables. @@ -148,7 +148,7 @@ struct charger_regulator { * @polling_interval_ms: interval in millisecond at which * charger manager will monitor battery health * @battery_present: - * Specify where information for existance of battery can be obtained + * Specify where information for existence of battery can be obtained * @psy_charger_stat: the names of power-supply for chargers * @num_charger_regulator: the number of entries in charger_regulators * @charger_regulators: array of charger regulators @@ -156,7 +156,7 @@ struct charger_regulator { * @thermal_zone : the name of thermal zone for battery * @temp_min : Minimum battery temperature for charging. * @temp_max : Maximum battery temperature for charging. - * @temp_diff : Temperature diffential to restart charging. + * @temp_diff : Temperature difference to restart charging. * @measure_battery_temp: * true: measure battery temperature * false: measure ambient temperature diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 1d2cd21242e8..54bf1484d41f 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -48,9 +48,9 @@ struct pps_source_info { struct pps_event_time { #ifdef CONFIG_NTP_PPS - struct timespec ts_raw; + struct timespec64 ts_raw; #endif /* CONFIG_NTP_PPS */ - struct timespec ts_real; + struct timespec64 ts_real; }; /* The main struct */ @@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps, struct pps_device *pps_lookup_dev(void const *cookie); static inline void timespec_to_pps_ktime(struct pps_ktime *kt, - struct timespec ts) + struct timespec64 ts) { kt->sec = ts.tv_sec; kt->nsec = ts.tv_nsec; @@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, static inline void pps_get_ts(struct pps_event_time *ts) { - getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); + ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); } #else /* CONFIG_NTP_PPS */ static inline void pps_get_ts(struct pps_event_time *ts) { - getnstimeofday(&ts->ts_real); + ktime_get_real_ts64(&ts->ts_real); } #endif /* CONFIG_NTP_PPS */ /* Subtract known time delay from PPS event time(s) */ -static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) +static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) { - ts->ts_real = timespec_sub(ts->ts_real, delta); + ts->ts_real = timespec64_sub(ts->ts_real, delta); #ifdef CONFIG_NTP_PPS - ts->ts_raw = timespec_sub(ts->ts_raw, delta); + ts->ts_raw = timespec64_sub(ts->ts_raw, delta); #endif } diff --git a/include/linux/pr.h b/include/linux/pr.h new file mode 100644 index 000000000000..65c01c10b335 --- /dev/null +++ b/include/linux/pr.h @@ -0,0 +1,18 @@ +#ifndef LINUX_PR_H +#define LINUX_PR_H + +#include <uapi/linux/pr.h> + +struct pr_ops { + int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key, + u32 flags); + int (*pr_reserve)(struct block_device *bdev, u64 key, + enum pr_type type, u32 flags); + int (*pr_release)(struct block_device *bdev, u64 key, + enum pr_type type); + int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key, + enum pr_type type, bool abort); + int (*pr_clear)(struct block_device *bdev, u64 key); +}; + +#endif /* LINUX_PR_H */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bea8dd8ff5e0..75e4e30677f1 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -26,7 +26,6 @@ * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 * NMI_MASK: 0x00100000 - * PREEMPT_ACTIVE: 0x00200000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 @@ -53,10 +52,6 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -#define PREEMPT_ACTIVE_BITS 1 -#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) -#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) - /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 @@ -126,8 +121,7 @@ * Check whether we were atomic before we did preempt_disable(): * (used by the scheduler) */ -#define in_atomic_preempt_off() \ - ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) +#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void preempt_count_add(int val); @@ -146,18 +140,6 @@ extern void preempt_count_sub(int val); #define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) -#define preempt_active_enter() \ -do { \ - preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ - barrier(); \ -} while (0) - -#define preempt_active_exit() \ -do { \ - barrier(); \ - preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ -} while (0) - #ifdef CONFIG_PREEMPT_COUNT #define preempt_disable() \ diff --git a/include/linux/property.h b/include/linux/property.h index a59c6ee566c2..463de52fe891 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -40,6 +40,8 @@ int device_property_read_string_array(struct device *dev, const char *propname, const char **val, size_t nval); int device_property_read_string(struct device *dev, const char *propname, const char **val); +int device_property_match_string(struct device *dev, + const char *propname, const char *string); bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, @@ -59,6 +61,8 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, size_t nval); int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val); +int fwnode_property_match_string(struct fwnode_handle *fwnode, + const char *propname, const char *string); struct fwnode_handle *device_get_next_child_node(struct device *dev, struct fwnode_handle *child); diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 8e7a25b068b0..831479f8df8f 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -75,20 +75,8 @@ struct pstore_info { #define PSTORE_FLAGS_FRAGILE 1 -#ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); +extern void pstore_unregister(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); -#else -static inline int -pstore_register(struct pstore_info *psi) -{ - return -ENODEV; -} -static inline bool -pstore_cannot_block_path(enum kmsg_dump_reason reason) -{ - return false; -} -#endif #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index 159c987b1853..a079656b614c 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -32,9 +32,9 @@ #define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ #define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ #define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ -#define PTP_CLASS_L2 0x30 /* event in a L2 packet */ -#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */ -#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged packet */ +#define PTP_CLASS_L2 0x40 /* event in a L2 packet */ +#define PTP_CLASS_PMASK 0x70 /* mask for the packet type field */ +#define PTP_CLASS_VLAN 0x80 /* event in a VLAN tagged packet */ #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */ @@ -42,6 +42,7 @@ #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) +#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6) #define PTP_EV_PORT 319 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 92273776bce6..c2f2574ff61c 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -198,6 +198,7 @@ enum pxa_ssp_type { LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ LPSS_BYT_SSP, LPSS_SPT_SSP, + LPSS_BXT_SSP, }; struct ssp_device { diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h new file mode 100644 index 000000000000..6a4347639c03 --- /dev/null +++ b/include/linux/qed/common_hsi.h @@ -0,0 +1,607 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __COMMON_HSI__ +#define __COMMON_HSI__ + +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 4 +#define FW_REVISION_VERSION 2 +#define FW_ENGINEERING_VERSION 0 + +/***********************/ +/* COMMON HW CONSTANTS */ +/***********************/ + +/* PCI functions */ +#define MAX_NUM_PORTS_K2 (4) +#define MAX_NUM_PORTS_BB (2) +#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) + +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ + +#define MAX_NUM_VFS_K2 (192) +#define MAX_NUM_VFS_BB (120) +#define MAX_NUM_VFS (MAX_NUM_VFS_K2) + +#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) +#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_NUM_VPORTS_K2 (208) +#define MAX_NUM_VPORTS_BB (160) +#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) + +#define MAX_NUM_L2_QUEUES_K2 (320) +#define MAX_NUM_L2_QUEUES_BB (256) +#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2) + +/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ +#define NUM_PHYS_TCS_4PORT_K2 (4) +#define NUM_OF_PHYS_TCS (8) + +#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) +#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) + +#define LB_TC (NUM_OF_PHYS_TCS) + +/* Num of possible traffic priority values */ +#define NUM_OF_PRIO (8) + +#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) +#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) +#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) +#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) + +/* CIDs */ +#define NUM_OF_CONNECTION_TYPES (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) + +/*****************/ +/* CDU CONSTANTS */ +/*****************/ + +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) + +/*****************/ +/* DQ CONSTANTS */ +/*****************/ + +/* DEMS */ +#define DQ_DEMS_LEGACY 0 + +/* XCM agg val selection */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection */ +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 + +/* XCM agg counter flag selection */ +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 + +/* XCM agg counter flag selection */ +#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF23) + +/*****************/ +/* QM CONSTANTS */ +/*****************/ + +/* number of TX queues in the QM */ +#define MAX_QM_TX_QUEUES_K2 512 +#define MAX_QM_TX_QUEUES_BB 448 +#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 + +/* number of Other queues in the QM */ +#define MAX_QM_OTHER_QUEUES_BB 64 +#define MAX_QM_OTHER_QUEUES_K2 128 +#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 + +/* number of queues in a PF queue group */ +#define QM_PF_QUEUE_GROUP_SIZE 8 + +/* base number of Tx PQs in the CM PQ representation. + * should be used when storing PQ IDs in CM PQ registers and context + */ +#define CM_TX_PQ_BASE 0x200 + +/* QM registers data */ +#define QM_LINE_CRD_REG_WIDTH 16 +#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) +#define QM_BYTE_CRD_REG_WIDTH 24 +#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) +#define QM_WFQ_CRD_REG_WIDTH 32 +#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) +#define QM_RL_CRD_REG_WIDTH 32 +#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) + +/*****************/ +/* CAU CONSTANTS */ +/*****************/ + +#define CAU_FSM_ETH_RX 0 +#define CAU_FSM_ETH_TX 1 + +/* Number of Protocol Indices per Status Block */ +#define PIS_PER_SB 12 + +#define CAU_HC_STOPPED_STATE 3 +#define CAU_HC_DISABLE_STATE 4 +#define CAU_HC_ENABLE_STATE 0 + +/*****************/ +/* IGU CONSTANTS */ +/*****************/ + +#define MAX_SB_PER_PATH_K2 (368) +#define MAX_SB_PER_PATH_BB (288) +#define MAX_TOT_SB_PER_PATH \ + MAX_SB_PER_PATH_K2 + +#define MAX_SB_PER_PF_MIMD 129 +#define MAX_SB_PER_PF_SIMD 64 +#define MAX_SB_PER_VF 64 + +/* Memory addresses on the BAR for the IGU Sub Block */ +#define IGU_MEM_BASE 0x0000 + +#define IGU_MEM_MSIX_BASE 0x0000 +#define IGU_MEM_MSIX_UPPER 0x0101 +#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff + +#define IGU_MEM_PBA_MSIX_BASE 0x0200 +#define IGU_MEM_PBA_MSIX_UPPER 0x0202 +#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff + +#define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff + +#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 +#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 +#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 + +#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 +#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 +#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 +#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 + +#define IGU_CMD_PROD_UPD_BASE 0x0600 +#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff + +/*****************/ +/* PXP CONSTANTS */ +/*****************/ + +/* PTT and GTT */ +#define PXP_NUM_PF_WINDOWS 12 +#define PXP_PER_PF_ENTRY_SIZE 8 +#define PXP_NUM_GLOBAL_WINDOWS 243 +#define PXP_GLOBAL_ENTRY_SIZE 4 +#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 +#define PXP_PF_WINDOW_ADMIN_START 0 +#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 +#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ + PXP_PF_WINDOW_ADMIN_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 +#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ + PXP_PER_PF_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ + PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 +#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ + PXP_GLOBAL_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ + (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ + PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) +#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 +#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 +#define PXP_PF_ME_OPAQUE_ADDR 0x1f8 +#define PXP_PF_ME_CONCRETE_ADDR 0x1fc + +#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS +#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_PF_WINDOW_END \ + (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ + PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) + +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ + (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +/* ILT Records */ +#define PXP_NUM_ILT_RECORDS_BB 7600 +#define PXP_NUM_ILT_RECORDS_K2 11000 +#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) + +/******************/ +/* PBF CONSTANTS */ +/******************/ + +/* Number of PBF command queue lines. Each line is 32B. */ +#define PBF_MAX_CMD_LINES 3328 + +/* Number of BTB blocks. Each block is 256B. */ +#define BTB_MAX_BLOCKS 1440 + +/*****************/ +/* PRS CONSTANTS */ +/*****************/ + +/* Async data KCQ CQE */ +struct async_data { + __le32 cid; + __le16 itid; + u8 error_code; + u8 fw_debug_param; +}; + +struct regpair { + __le32 lo; + __le32 hi; +}; + +/* Event Data Union */ +union event_ring_data { + u8 bytes[8]; + struct async_data async_info; +}; + +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + __le16 reserved0; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + +/* Multi function mode */ +enum mf_mode { + SF, + MF_OVLAN, + MF_NPAR, + MAX_MF_MODE +}; + +/* Per-protocol connection types */ +enum protocol_type { + PROTOCOLID_RESERVED1, + PROTOCOLID_RESERVED2, + PROTOCOLID_RESERVED3, + PROTOCOLID_CORE, + PROTOCOLID_ETH, + PROTOCOLID_RESERVED4, + PROTOCOLID_RESERVED5, + PROTOCOLID_PREROCE, + PROTOCOLID_COMMON, + PROTOCOLID_RESERVED6, + MAX_PROTOCOL_TYPE +}; + +/* status block structure */ +struct cau_pi_entry { + u32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 +}; + +/* status block structure */ +struct cau_sb_entry { + u32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + u32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 +}; + +/* core doorbell data */ +struct core_db_data { + u8 params; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; +}; + +/* Enum of doorbell aggregative command selection */ +enum db_agg_cmd_sel { + DB_AGG_CMD_NOP, + DB_AGG_CMD_SET, + DB_AGG_CMD_ADD, + DB_AGG_CMD_MAX, + MAX_DB_AGG_CMD_SEL +}; + +/* Enum of doorbell destination */ +enum db_dest { + DB_DEST_XCM, + DB_DEST_UCM, + DB_DEST_TCM, + DB_NUM_DESTINATIONS, + MAX_DB_DEST +}; + +/* Structure for doorbell address, in legacy mode */ +struct db_legacy_addr { + __le32 addr; +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 +}; + +/* Igu interrupt command */ +enum igu_int_cmd { + IGU_INT_ENABLE = 0, + IGU_INT_DISABLE = 1, + IGU_INT_NOP = 2, + IGU_INT_NOP2 = 3, + MAX_IGU_INT_CMD +}; + +/* IGU producer or consumer update command */ +struct igu_prod_cons_update { + u32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + u32 reserved1; +}; + +/* Igu segments access for default status block only */ +enum igu_seg_access { + IGU_SEG_ACCESS_REG = 0, + IGU_SEG_ACCESS_ATTN = 1, + MAX_IGU_SEG_ACCESS +}; + +struct parsing_and_err_flags { + __le16 flags; +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +}; + +/* Concrete Function ID. */ +struct pxp_concrete_fid { + __le16 fid; +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 +}; + +struct pxp_pretend_concrete_fid { + __le16 fid; +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +}; + +union pxp_pretend_fid { + struct pxp_pretend_concrete_fid concrete_fid; + __le16 opaque_fid; +}; + +/* Pxp Pretend Command Register. */ +struct pxp_pretend_cmd { + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 +}; + +/* PTT Record in PXP Admin Window. */ +struct pxp_ptt_entry { + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; +}; + +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + +/* status block structure */ +struct status_block { + __le16 pi_array[PIS_PER_SB]; + __le32 sb_num; +#define STATUS_BLOCK_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 + __le32 prod_index; +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +}; + +#endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h new file mode 100644 index 000000000000..320b3373ac1d --- /dev/null +++ b/include/linux/qed/eth_common.h @@ -0,0 +1,279 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __ETH_COMMON__ +#define __ETH_COMMON__ + +/********************/ +/* ETH FW CONSTANTS */ +/********************/ +#define ETH_CACHE_LINE_SIZE 64 + +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 +#define ETH_RX_NUM_NEXT_PAGE_SGES 2 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS + +#define ETH_REG_CQE_PBL_SIZE 3 + +/* num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 + +/* approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) + +/* ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 + +/* TPA constants */ +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_SGL_SIZE 3 +#define ETH_TPA_CQE_CONT_SGL_SIZE 6 +#define ETH_TPA_CQE_END_SGL_SIZE 4 + +/* Queue Zone sizes */ +#define TSTORM_QZONE_SIZE 0 +#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone) +#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone) +#define XSTORM_QZONE_SIZE 0 +#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone) +#define PSTORM_QZONE_SIZE 0 + +/* Interrupt coalescing TimeSet */ +struct coalescing_timeset { + u8 timeset; + u8 valid; +}; + +struct eth_tx_1st_bd_flags { + u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +}; + +/* The parsing information data fo rthe first tx bd of a given packet. */ +struct eth_tx_data_1st_bd { + __le16 vlan; + u8 nbds; + struct eth_tx_1st_bd_flags bd_flags; + __le16 fw_use_only; +}; + +/* The parsing information data for the second tx bd of a given packet. */ +struct eth_tx_data_2nd_bd { + __le16 tunn_ip_size; + __le16 bitfields; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 +}; + +/* Regular ETH Rx FP CQE. */ +struct eth_fast_path_rx_reg_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 + __le16 pkt_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_bd; + u8 placement_offset; + u8 reserved; + __le16 pbl[ETH_REG_CQE_PBL_SIZE]; + u8 reserved1[10]; +}; + +/* The L4 pseudo checksum mode for Ethernet */ +enum eth_l4_pseudo_checksum_mode { + ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH, + ETH_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_ETH_L4_PSEUDO_CHECKSUM_MODE +}; + +struct eth_rx_bd { + struct regpair addr; +}; + +/* regular ETH Rx SP CQE */ +struct eth_slow_path_rx_cqe { + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[27]; + __le16 echo; +}; + +/* union for all ETH Rx CQE types */ +union eth_rx_cqe { + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_slow_path_rx_cqe slow_path; +}; + +/* ETH Rx CQE type */ +enum eth_rx_cqe_type { + ETH_RX_CQE_TYPE_UNUSED, + ETH_RX_CQE_TYPE_REGULAR, + ETH_RX_CQE_TYPE_SLOW_PATH, + MAX_ETH_RX_CQE_TYPE +}; + +/* ETH Rx producers data */ +struct eth_rx_prod_data { + __le16 bd_prod; + __le16 sge_prod; + __le16 cqe_prod; + __le16 reserved; +}; + +/* The first tx bd of a given packet */ +struct eth_tx_1st_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; +}; + +/* The second tx bd of a given packet */ +struct eth_tx_2nd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; +}; + +/* The parsing information data for the third tx bd of a given packet. */ +struct eth_tx_data_3rd_bd { + __le16 lso_mss; + u8 bitfields; +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 + u8 resereved0[3]; +}; + +/* The third tx bd of a given packet */ +struct eth_tx_3rd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; +}; + +/* The common non-special TX BD ring element */ +struct eth_tx_bd { + struct regpair addr; + __le16 nbytes; + __le16 reserved0; + __le32 reserved1; +}; + +union eth_tx_bd_types { + struct eth_tx_1st_bd first_bd; + struct eth_tx_2nd_bd second_bd; + struct eth_tx_3rd_bd third_bd; + struct eth_tx_bd reg_bd; +}; + +/* Mstorm Queue Zone */ +struct mstorm_eth_queue_zone { + struct eth_rx_prod_data rx_producers; + __le32 reserved[2]; +}; + +/* Ustorm Queue Zone */ +struct ustorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + __le16 reserved[3]; +}; + +/* Ystorm Queue Zone */ +struct ystorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + __le16 reserved[3]; +}; + +/* ETH doorbell data */ +struct eth_db_data { + u8 params; +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; +}; + +#endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h new file mode 100644 index 000000000000..b920c3605c46 --- /dev/null +++ b/include/linux/qed/qed_chain.h @@ -0,0 +1,539 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_CHAIN_H +#define _QED_CHAIN_H + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/qed/common_hsi.h> + +/* dma_addr_t manip */ +#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) + +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) +#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) +#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) +#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) + +enum qed_chain_mode { + /* Each Page contains a next pointer at its end */ + QED_CHAIN_MODE_NEXT_PTR, + + /* Chain is a single page (next ptr) is unrequired */ + QED_CHAIN_MODE_SINGLE, + + /* Page pointers are located in a side list */ + QED_CHAIN_MODE_PBL, +}; + +enum qed_chain_use_mode { + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ +}; + +struct qed_chain_next { + struct regpair next_phys; + void *next_virt; +}; + +struct qed_chain_pbl { + dma_addr_t p_phys_table; + void *p_virt_table; + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct qed_chain { + void *p_virt_addr; + dma_addr_t p_phys_addr; + void *p_prod_elem; + void *p_cons_elem; + u16 page_cnt; + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; /* used to produce/consume */ + u16 capacity; /*< number of _usable_ elements */ + u16 size; /* number of elements */ + u16 prod_idx; + u16 cons_idx; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_unusable; + u16 usable_per_page; + u16 elem_size; + u16 next_page_mask; + struct qed_chain_pbl pbl; +}; + +#define QED_CHAIN_PBL_ENTRY_SIZE (8) +#define QED_CHAIN_PAGE_SIZE (0x1000) +#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) + +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ + (1 + ((sizeof(struct qed_chain_next) - 1) / \ + (elem_size))) : 0) + +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) + +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) + +/* Accessors */ +static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) +{ + return p_chain->prod_idx; +} + +static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +{ + return p_chain->cons_idx; +} + +static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +{ + u16 used; + + /* we don't need to trancate upon assignmet, as we assign u32->u16 */ + used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - + (u32)p_chain->cons_idx; + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (used / p_chain->elem_per_page); + + return p_chain->capacity - used; +} + +static inline u8 qed_chain_is_full(struct qed_chain *p_chain) +{ + return qed_chain_get_elem_left(p_chain) == p_chain->capacity; +} + +static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) +{ + return qed_chain_get_elem_left(p_chain) == 0; +} + +static inline u16 qed_chain_get_elem_per_page( + struct qed_chain *p_chain) +{ + return p_chain->elem_per_page; +} + +static inline u16 qed_chain_get_usable_per_page( + struct qed_chain *p_chain) +{ + return p_chain->usable_per_page; +} + +static inline u16 qed_chain_get_unusable_per_page( + struct qed_chain *p_chain) +{ + return p_chain->elem_unusable; +} + +static inline u16 qed_chain_get_size(struct qed_chain *p_chain) +{ + return p_chain->size; +} + +static inline dma_addr_t +qed_chain_get_pbl_phys(struct qed_chain *p_chain) +{ + return p_chain->pbl.p_phys_table; +} + +/** + * @brief qed_chain_advance_page - + * + * Advance the next element accros pages for a linked chain + * + * @param p_chain + * @param p_next_elem + * @param idx_to_inc + * @param page_to_inc + */ +static inline void +qed_chain_advance_page(struct qed_chain *p_chain, + void **p_next_elem, + u16 *idx_to_inc, + u16 *page_to_inc) + +{ + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + { + struct qed_chain_next *p_next = *p_next_elem; + *p_next_elem = p_next->next_virt; + *idx_to_inc += p_chain->elem_unusable; + break; + } + case QED_CHAIN_MODE_SINGLE: + *p_next_elem = p_chain->p_virt_addr; + break; + + case QED_CHAIN_MODE_PBL: + /* It is assumed pages are sequential, next element needs + * to change only when passing going back to first from last. + */ + if (++(*page_to_inc) == p_chain->page_cnt) { + *page_to_inc = 0; + *p_next_elem = p_chain->p_virt_addr; + } + } +} + +#define is_unusable_idx(p, idx) \ + (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_next_idx(p, idx) \ + ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define test_ans_skip(p, idx) \ + do { \ + if (is_unusable_idx(p, idx)) { \ + (p)->idx += (p)->elem_unusable; \ + } \ + } while (0) + +/** + * @brief qed_chain_return_multi_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + * @param num + */ +static inline void +qed_chain_return_multi_produced(struct qed_chain *p_chain, + u16 num) +{ + p_chain->cons_idx += num; + test_ans_skip(p_chain, cons_idx); +} + +/** + * @brief qed_chain_return_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + */ +static inline void qed_chain_return_produced(struct qed_chain *p_chain) +{ + p_chain->cons_idx++; + test_ans_skip(p_chain, cons_idx); +} + +/** + * @brief qed_chain_produce - + * + * A chain in which the driver "Produces" elements should use this to get + * a pointer to the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for new element. + * + * @param p_chain + * + * @return void*, a pointer to next element + */ +static inline void *qed_chain_produce(struct qed_chain *p_chain) +{ + void *ret = NULL; + + if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == + p_chain->next_page_mask) { + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + &p_chain->prod_idx, + &p_chain->pbl.prod_page_idx); + } + + ret = p_chain->p_prod_elem; + p_chain->prod_idx++; + p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + + p_chain->elem_size); + + return ret; +} + +/** + * @brief qed_chain_get_capacity - + * + * Get the maximum number of BDs in chain + * + * @param p_chain + * @param num + * + * @return u16, number of unusable BDs + */ +static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) +{ + return p_chain->capacity; +} + +/** + * @brief qed_chain_recycle_consumed - + * + * Returns an element which was previously consumed; + * Increments producers so they could be written to FW. + * + * @param p_chain + */ +static inline void +qed_chain_recycle_consumed(struct qed_chain *p_chain) +{ + test_ans_skip(p_chain, prod_idx); + p_chain->prod_idx++; +} + +/** + * @brief qed_chain_consume - + * + * A Chain in which the driver utilizes data written by a different source + * (i.e., FW) should use this to access passed buffers. + * + * @param p_chain + * + * @return void*, a pointer to the next buffer written + */ +static inline void *qed_chain_consume(struct qed_chain *p_chain) +{ + void *ret = NULL; + + if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == + p_chain->next_page_mask) { + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + &p_chain->cons_idx, + &p_chain->pbl.cons_page_idx); + } + + ret = p_chain->p_cons_elem; + p_chain->cons_idx++; + p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + + p_chain->elem_size); + + return ret; +} + +/** + * @brief qed_chain_reset - Resets the chain to its start state + * + * @param p_chain pointer to a previously allocted chain + */ +static inline void qed_chain_reset(struct qed_chain *p_chain) +{ + int i; + + p_chain->prod_idx = 0; + p_chain->cons_idx = 0; + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; + + if (p_chain->mode == QED_CHAIN_MODE_PBL) { + p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; + p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; + } + + switch (p_chain->intended_use) { + case QED_CHAIN_USE_TO_CONSUME_PRODUCE: + case QED_CHAIN_USE_TO_PRODUCE: + /* Do nothing */ + break; + + case QED_CHAIN_USE_TO_CONSUME: + /* produce empty elements */ + for (i = 0; i < p_chain->capacity; i++) + qed_chain_recycle_consumed(p_chain); + break; + } +} + +/** + * @brief qed_chain_init - Initalizes a basic chain struct + * + * @param p_chain + * @param p_virt_addr + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param intended_use + * @param mode + */ +static inline void qed_chain_init(struct qed_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr, + u16 page_cnt, + u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode) +{ + /* chain fixed parameters */ + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; + p_chain->elem_size = elem_size; + p_chain->page_cnt = page_cnt; + p_chain->mode = mode; + + p_chain->intended_use = intended_use; + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->usable_per_page = + USABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + + p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + + p_chain->next_page_mask = (p_chain->usable_per_page & + p_chain->elem_per_page_mask); + + if (mode == QED_CHAIN_MODE_NEXT_PTR) { + struct qed_chain_next *p_next; + u16 i; + + for (i = 0; i < page_cnt - 1; i++) { + /* Increment mem_phy to the next page. */ + p_phys_addr += QED_CHAIN_PAGE_SIZE; + + /* Initialize the physical address of the next page. */ + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + elem_size * + p_chain-> + usable_per_page); + + p_next->next_phys.lo = DMA_LO_LE(p_phys_addr); + p_next->next_phys.hi = DMA_HI_LE(p_phys_addr); + + /* Initialize the virtual address of the next page. */ + p_next->next_virt = (void *)((u8 *)p_virt_addr + + QED_CHAIN_PAGE_SIZE); + + /* Move to the next page. */ + p_virt_addr = p_next->next_virt; + } + + /* Last page's next should point to beginning of the chain */ + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + elem_size * + p_chain->usable_per_page); + + p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); + p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); + p_next->next_virt = p_chain->p_virt_addr; + } + qed_chain_reset(p_chain); +} + +/** + * @brief qed_chain_pbl_init - Initalizes a basic pbl chain + * struct + * @param p_chain + * @param p_virt_addr virtual address of allocated buffer's beginning + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param use_mode + * @param p_phys_pbl pointer to a pre-allocated side table + * which will hold physical page addresses. + * @param p_virt_pbl pointer to a pre allocated side table + * which will hold virtual page addresses. + */ +static inline void +qed_chain_pbl_init(struct qed_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr, + u16 page_cnt, + u8 elem_size, + enum qed_chain_use_mode use_mode, + dma_addr_t p_phys_pbl, + dma_addr_t *p_virt_pbl) +{ + dma_addr_t *p_pbl_dma = p_virt_pbl; + int i; + + qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt, + elem_size, use_mode, QED_CHAIN_MODE_PBL); + + p_chain->pbl.p_phys_table = p_phys_pbl; + p_chain->pbl.p_virt_table = p_virt_pbl; + + /* Fill the PBL with physical addresses*/ + for (i = 0; i < page_cnt; i++) { + *p_pbl_dma = p_phys_addr; + p_phys_addr += QED_CHAIN_PAGE_SIZE; + p_pbl_dma++; + } +} + +/** + * @brief qed_chain_set_prod - sets the prod to the given + * value + * + * @param prod_idx + * @param p_prod_elem + */ +static inline void qed_chain_set_prod(struct qed_chain *p_chain, + u16 prod_idx, + void *p_prod_elem) +{ + p_chain->prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief qed_chain_get_elem - + * + * get a pointer to an element represented by absolute idx + * + * @param p_chain + * @assumption p_chain->size is a power of 2 + * + * @return void*, a pointer to next element + */ +static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, + u16 idx) +{ + void *ret = NULL; + + if (idx >= p_chain->size) + return NULL; + + ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; + + return ret; +} + +/** + * @brief qed_chain_sge_inc_cons_prod + * + * for sge chains, producer isn't increased serially, the ring + * is expected to be full at all times. Once elements are + * consumed, they are immediately produced. + * + * @param p_chain + * @param cnt + * + * @return inline void + */ +static inline void +qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain, + u16 cnt) +{ + p_chain->prod_idx += cnt; + p_chain->cons_idx += cnt; +} + +#endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h new file mode 100644 index 000000000000..81ab178e31c1 --- /dev/null +++ b/include/linux/qed/qed_eth_if.h @@ -0,0 +1,165 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_ETH_IF_H +#define _QED_ETH_IF_H + +#include <linux/list.h> +#include <linux/if_link.h> +#include <linux/qed/eth_common.h> +#include <linux/qed/qed_if.h> + +struct qed_dev_eth_info { + struct qed_dev_info common; + + u8 num_queues; + u8 num_tc; + + u8 port_mac[ETH_ALEN]; + u8 num_vlan_filters; +}; + +struct qed_update_vport_rss_params { + u16 rss_ind_table[128]; + u32 rss_key[10]; +}; + +struct qed_update_vport_params { + u8 vport_id; + u8 update_vport_active_flg; + u8 vport_active_flg; + u8 update_rss_flg; + struct qed_update_vport_rss_params rss_params; +}; + +struct qed_stop_rxq_params { + u8 rss_id; + u8 rx_queue_id; + u8 vport_id; + bool eq_completion_only; +}; + +struct qed_stop_txq_params { + u8 rss_id; + u8 tx_queue_id; +}; + +enum qed_filter_rx_mode_type { + QED_FILTER_RX_MODE_TYPE_REGULAR, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, + QED_FILTER_RX_MODE_TYPE_PROMISC, +}; + +enum qed_filter_xcast_params_type { + QED_FILTER_XCAST_TYPE_ADD, + QED_FILTER_XCAST_TYPE_DEL, + QED_FILTER_XCAST_TYPE_REPLACE, +}; + +struct qed_filter_ucast_params { + enum qed_filter_xcast_params_type type; + u8 vlan_valid; + u16 vlan; + u8 mac_valid; + unsigned char mac[ETH_ALEN]; +}; + +struct qed_filter_mcast_params { + enum qed_filter_xcast_params_type type; + u8 num; + unsigned char mac[64][ETH_ALEN]; +}; + +union qed_filter_type_params { + enum qed_filter_rx_mode_type accept_flags; + struct qed_filter_ucast_params ucast; + struct qed_filter_mcast_params mcast; +}; + +enum qed_filter_type { + QED_FILTER_TYPE_UCAST, + QED_FILTER_TYPE_MCAST, + QED_FILTER_TYPE_RX_MODE, + QED_MAX_FILTER_TYPES, +}; + +struct qed_filter_params { + enum qed_filter_type type; + union qed_filter_type_params filter; +}; + +struct qed_queue_start_common_params { + u8 rss_id; + u8 queue_id; + u8 vport_id; + u16 sb; + u16 sb_idx; +}; + +struct qed_eth_cb_ops { + struct qed_common_cb_ops common; +}; + +struct qed_eth_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_eth_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_eth_cb_ops *ops, + void *cookie); + + int (*vport_start)(struct qed_dev *cdev, + u8 vport_id, u16 mtu, + u8 drop_ttl0_flg, + u8 inner_vlan_removal_en_flg); + + int (*vport_stop)(struct qed_dev *cdev, + u8 vport_id); + + int (*vport_update)(struct qed_dev *cdev, + struct qed_update_vport_params *params); + + int (*q_rx_start)(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void __iomem **pp_prod); + + int (*q_rx_stop)(struct qed_dev *cdev, + struct qed_stop_rxq_params *params); + + int (*q_tx_start)(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + dma_addr_t pbl_addr, + u16 pbl_size, + void __iomem **pp_doorbell); + + int (*q_tx_stop)(struct qed_dev *cdev, + struct qed_stop_txq_params *params); + + int (*filter_config)(struct qed_dev *cdev, + struct qed_filter_params *params); + + int (*fastpath_stop)(struct qed_dev *cdev); + + int (*eth_cqe_completion)(struct qed_dev *cdev, + u8 rss_id, + struct eth_slow_path_rx_cqe *cqe); + + void (*get_vport_stats)(struct qed_dev *cdev, + struct qed_eth_stats *stats); +}; + +const struct qed_eth_ops *qed_get_eth_ops(u32 version); +void qed_put_eth_ops(void); + +#endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h new file mode 100644 index 000000000000..dc9a1353f971 --- /dev/null +++ b/include/linux/qed/qed_if.h @@ -0,0 +1,498 @@ +/* QLogic qed NIC Driver + * + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_IF_H +#define _QED_IF_H + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/qed/common_hsi.h> +#include <linux/qed/qed_chain.h> + +#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ + (void __iomem *)(reg_addr)) + +#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) + +#define QED_COALESCE_MAX 0xFF + +/* forward */ +struct qed_dev; + +struct qed_eth_pf_params { + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; +}; + +struct qed_pf_params { + struct qed_eth_pf_params eth_pf_params; +}; + +enum qed_int_mode { + QED_INT_MODE_INTA, + QED_INT_MODE_MSIX, + QED_INT_MODE_MSI, + QED_INT_MODE_POLL, +}; + +struct qed_sb_info { + struct status_block *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 + + struct qed_dev *cdev; +}; + +struct qed_dev_info { + unsigned long pci_mem_start; + unsigned long pci_mem_end; + unsigned int pci_irq; + u8 num_hwfns; + + u8 hw_mac[ETH_ALEN]; + bool is_mf; + + /* FW version */ + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + /* MFW version */ + u32 mfw_rev; + + u32 flash_size; + u8 mf_mode; +}; + +enum qed_sb_type { + QED_SB_TYPE_L2_QUEUE, +}; + +enum qed_protocol { + QED_PROTOCOL_ETH, +}; + +struct qed_link_params { + bool link_up; + +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) + u32 override_flags; + bool autoneg; + u32 adv_speeds; + u32 forced_speed; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + u32 pause_config; +}; + +struct qed_link_output { + bool link_up; + + u32 supported_caps; /* In SUPPORTED defs */ + u32 advertised_caps; /* In ADVERTISED defs */ + u32 lp_caps; /* In ADVERTISED defs */ + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; +}; + +#define QED_DRV_VER_STR_SIZE 12 +struct qed_slowpath_params { + u32 int_mode; + u8 drv_major; + u8 drv_minor; + u8 drv_rev; + u8 drv_eng; + u8 name[QED_DRV_VER_STR_SIZE]; +}; + +#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ + +struct qed_int_info { + struct msix_entry *msix; + u8 msix_cnt; + + /* This should be updated by the protocol driver */ + u8 used_cnt; +}; + +struct qed_common_cb_ops { + void (*link_update)(void *dev, + struct qed_link_output *link); +}; + +struct qed_common_ops { + struct qed_dev* (*probe)(struct pci_dev *dev, + enum qed_protocol protocol, + u32 dp_module, u8 dp_level); + + void (*remove)(struct qed_dev *cdev); + + int (*set_power_state)(struct qed_dev *cdev, + pci_power_t state); + + void (*set_id)(struct qed_dev *cdev, + char name[], + char ver_str[]); + + /* Client drivers need to make this call before slowpath_start. + * PF params required for the call before slowpath_start is + * documented within the qed_pf_params structure definition. + */ + void (*update_pf_params)(struct qed_dev *cdev, + struct qed_pf_params *params); + int (*slowpath_start)(struct qed_dev *cdev, + struct qed_slowpath_params *params); + + int (*slowpath_stop)(struct qed_dev *cdev); + + /* Requests to use `cnt' interrupts for fastpath. + * upon success, returns number of interrupts allocated for fastpath. + */ + int (*set_fp_int)(struct qed_dev *cdev, + u16 cnt); + + /* Fills `info' with pointers required for utilizing interrupts */ + int (*get_fp_int)(struct qed_dev *cdev, + struct qed_int_info *info); + + u32 (*sb_init)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id, + enum qed_sb_type type); + + u32 (*sb_release)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + u16 sb_id); + + void (*simd_handler_config)(struct qed_dev *cdev, + void *token, + int index, + void (*handler)(void *)); + + void (*simd_handler_clean)(struct qed_dev *cdev, + int index); +/** + * @brief set_link - set links according to params + * + * @param cdev + * @param params - values used to override the default link configuration + * + * @return 0 on success, error otherwise. + */ + int (*set_link)(struct qed_dev *cdev, + struct qed_link_params *params); + +/** + * @brief get_link - returns the current link state. + * + * @param cdev + * @param if_link - structure to be filled with current link configuration. + */ + void (*get_link)(struct qed_dev *cdev, + struct qed_link_output *if_link); + +/** + * @brief - drains chip in case Tx completions fail to arrive due to pause. + * + * @param cdev + */ + int (*drain)(struct qed_dev *cdev); + +/** + * @brief update_msglvl - update module debug level + * + * @param cdev + * @param dp_module + * @param dp_level + */ + void (*update_msglvl)(struct qed_dev *cdev, + u32 dp_module, + u8 dp_level); + + int (*chain_alloc)(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + u16 num_elems, + size_t elem_size, + struct qed_chain *p_chain); + + void (*chain_free)(struct qed_dev *cdev, + struct qed_chain *p_chain); +}; + +/** + * @brief qed_get_protocol_version + * + * @param protocol + * + * @return version supported by qed for given protocol driver + */ +u32 qed_get_protocol_version(enum qed_protocol protocol); + +#define MASK_FIELD(_name, _value) \ + ((_value) &= (_name ## _MASK)) + +#define FIELD_VALUE(_name, _value) \ + ((_value & _name ## _MASK) << _name ## _SHIFT) + +#define SET_FIELD(value, name, flag) \ + do { \ + (value) &= ~(name ## _MASK << name ## _SHIFT); \ + (value) |= (((u64)flag) << (name ## _SHIFT)); \ + } while (0) + +#define GET_FIELD(value, name) \ + (((value) >> (name ## _SHIFT)) & name ## _MASK) + +/* Debug print definitions */ +#define DP_ERR(cdev, fmt, ...) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__) \ + +#define DP_NOTICE(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + \ + } \ + } while (0) + +#define DP_INFO(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define DP_VERBOSE(cdev, module, fmt, ...) \ + do { \ + if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \ + ((cdev)->dp_module & module))) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +enum DP_LEVEL { + QED_LEVEL_VERBOSE = 0x0, + QED_LEVEL_INFO = 0x1, + QED_LEVEL_NOTICE = 0x2, + QED_LEVEL_ERR = 0x3, +}; + +#define QED_LOG_LEVEL_SHIFT (30) +#define QED_LOG_VERBOSE_MASK (0x3fffffff) +#define QED_LOG_INFO_MASK (0x40000000) +#define QED_LOG_NOTICE_MASK (0x80000000) + +enum DP_MODULE { + QED_MSG_SPQ = 0x10000, + QED_MSG_STATS = 0x20000, + QED_MSG_DCB = 0x40000, + QED_MSG_IOV = 0x80000, + QED_MSG_SP = 0x100000, + QED_MSG_STORAGE = 0x200000, + QED_MSG_CXT = 0x800000, + QED_MSG_ILT = 0x2000000, + QED_MSG_ROCE = 0x4000000, + QED_MSG_DEBUG = 0x8000000, + /* to be added...up to 0x8000000 */ +}; + +struct qed_eth_stats { + u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 mftag_filter_discards; + u64 mac_filter_discards; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_err_drop_pkts; + u64 tpa_coalesced_pkts; + u64 tpa_coalesced_events; + u64 tpa_aborts_num; + u64 tpa_not_coalesced_pkts; + u64 tpa_coalesced_bytes; + + /* port */ + u64 rx_64_byte_packets; + u64 rx_127_byte_packets; + u64 rx_255_byte_packets; + u64 rx_511_byte_packets; + u64 rx_1023_byte_packets; + u64 rx_1518_byte_packets; + u64 rx_1522_byte_packets; + u64 rx_2047_byte_packets; + u64 rx_4095_byte_packets; + u64 rx_9216_byte_packets; + u64 rx_16383_byte_packets; + u64 rx_crc_errors; + u64 rx_mac_crtl_frames; + u64 rx_pause_frames; + u64 rx_pfc_frames; + u64 rx_align_errors; + u64 rx_carrier_errors; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_fragments; + u64 tx_64_byte_packets; + u64 tx_65_to_127_byte_packets; + u64 tx_128_to_255_byte_packets; + u64 tx_256_to_511_byte_packets; + u64 tx_512_to_1023_byte_packets; + u64 tx_1024_to_1518_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; + u64 brb_truncates; + u64 brb_discards; + u64 rx_mac_bytes; + u64 rx_mac_uc_packets; + u64 rx_mac_mc_packets; + u64 rx_mac_bc_packets; + u64 rx_mac_frames_ok; + u64 tx_mac_bytes; + u64 tx_mac_uc_packets; + u64 tx_mac_mc_packets; + u64 tx_mac_bc_packets; + u64 tx_mac_ctrl_frames; +}; + +#define QED_SB_IDX 0x0002 + +#define RX_PI 0 +#define TX_PI(tc) (RX_PI + 1 + tc) + +static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) +{ + u32 prod = 0; + u16 rc = 0; + + prod = le32_to_cpu(sb_info->sb_virt->prod_index) & + STATUS_BLOCK_PROD_INDEX_MASK; + if (sb_info->sb_ack != prod) { + sb_info->sb_ack = prod; + rc |= QED_SB_IDX; + } + + /* Let SB update */ + mmiowb(); + return rc; +} + +/** + * + * @brief This function creates an update command for interrupts that is + * written to the IGU. + * + * @param sb_info - This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using qed_sb_init + * @param int_cmd - Enable/Disable/Nop + * @param upd_flg - whether igu consumer should be + * updated. + * + * @return inline void + */ +static inline void qed_sb_ack(struct qed_sb_info *sb_info, + enum igu_int_cmd int_cmd, + u8 upd_flg) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + mmiowb(); + barrier(); +} + +static inline void __internal_ram_wr(void *p_hwfn, + void __iomem *addr, + int size, + u32 *data) + +{ + unsigned int i; + + for (i = 0; i < size / sizeof(*data); i++) + DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]); +} + +static inline void internal_ram_wr(void __iomem *addr, + int size, + u32 *data) +{ + __internal_ram_wr(NULL, addr, size, data); +} + +#endif diff --git a/include/linux/random.h b/include/linux/random.h index e651874df2c9..a75840c1aa71 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -7,6 +7,8 @@ #define _LINUX_RANDOM_H #include <linux/list.h> +#include <linux/once.h> + #include <uapi/linux/random.h> struct random_ready_callback { @@ -45,6 +47,10 @@ struct rnd_state { u32 prandom_u32_state(struct rnd_state *state); void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) /** * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 830c4992088d..a5aa7ae671f4 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -101,13 +101,21 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent }) /** - * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of - * given type safe against removal of rb_node entry + * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of + * given type allowing the backing memory of @pos to be invalidated * * @pos: the 'type *' to use as a loop cursor. * @n: another 'type *' to use as temporary storage * @root: 'rb_root *' of the rbtree. * @field: the name of the rb_node field within 'type'. + * + * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as + * list_for_each_entry_safe() and allows the iteration to continue independent + * of changes to @pos by the body of the loop. + * + * Note, however, that it cannot handle other modifications that re-order the + * rbtree it is iterating over. This includes calling rb_erase() on @pos, as + * rb_erase() may rebalance the tree, causing us to miss some nodes. */ #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h new file mode 100644 index 000000000000..a63a33e6196e --- /dev/null +++ b/include/linux/rcu_sync.h @@ -0,0 +1,86 @@ +/* + * RCU-based infrastructure for lightweight reader-writer locking + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (c) 2015, Red Hat, Inc. + * + * Author: Oleg Nesterov <oleg@redhat.com> + */ + +#ifndef _LINUX_RCU_SYNC_H_ +#define _LINUX_RCU_SYNC_H_ + +#include <linux/wait.h> +#include <linux/rcupdate.h> + +enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; + +/* Structure to mediate between updaters and fastpath-using readers. */ +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + + int cb_state; + struct rcu_head cb_head; + + enum rcu_sync_type gp_type; +}; + +extern void rcu_sync_lockdep_assert(struct rcu_sync *); + +/** + * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? + * @rsp: Pointer to rcu_sync structure to use for synchronization + * + * Returns true if readers are permitted to use their fastpaths. + * Must be invoked within an RCU read-side critical section whose + * flavor matches that of the rcu_sync struture. + */ +static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) +{ +#ifdef CONFIG_PROVE_RCU + rcu_sync_lockdep_assert(rsp); +#endif + return !rsp->gp_state; /* GP_IDLE */ +} + +extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); +extern void rcu_sync_enter(struct rcu_sync *); +extern void rcu_sync_exit(struct rcu_sync *); +extern void rcu_sync_dtor(struct rcu_sync *); + +#define __RCU_SYNC_INITIALIZER(name, type) { \ + .gp_state = 0, \ + .gp_count = 0, \ + .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ + .cb_state = 0, \ + .gp_type = type, \ + } + +#define __DEFINE_RCU_SYNC(name, type) \ + struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) + +#define DEFINE_RCU_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SYNC) + +#define DEFINE_RCU_SCHED_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) + +#define DEFINE_RCU_BH_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) + +#endif /* _LINUX_RCU_SYNC_H_ */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 17c6b1f84a77..5ed540986019 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -247,10 +247,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ -({ \ - typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ - container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ -}) + container_of(lockless_dereference(ptr), type, member) /** * Where are list_empty_rcu() and list_first_entry_rcu()? diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 581abf848566..a0189ba67fde 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -160,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename, * more than one CPU). */ void call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head)); + rcu_callback_t func); #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -191,7 +191,7 @@ void call_rcu(struct rcu_head *head, * memory ordering guarantees. */ void call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head)); + rcu_callback_t func); /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -213,7 +213,7 @@ void call_rcu_bh(struct rcu_head *head, * memory ordering guarantees. */ void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)); + rcu_callback_t func); void synchronize_sched(void); @@ -274,7 +274,7 @@ do { \ * See the description of call_rcu() for more detailed information on * memory ordering guarantees. */ -void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); @@ -297,12 +297,14 @@ void synchronize_rcu(void); static inline void __rcu_read_lock(void) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_disable(); } static inline void __rcu_read_unlock(void) { - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_enable(); } static inline void synchronize_rcu(void) @@ -535,29 +537,9 @@ static inline int rcu_read_lock_sched_held(void) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */ -static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void) -{ -} - #ifdef CONFIG_PROVE_RCU /** - * rcu_lockdep_assert - emit lockdep splat if specified condition not met - * @c: condition to check - * @s: informative message - */ -#define rcu_lockdep_assert(c, s) \ - do { \ - static bool __section(.data.unlikely) __warned; \ - deprecate_rcu_lockdep_assert(); \ - if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ - __warned = true; \ - lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ - } \ - } while (0) - -/** * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message @@ -594,7 +576,6 @@ static inline void rcu_preempt_sleep_check(void) #else /* #ifdef CONFIG_PROVE_RCU */ -#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert() #define RCU_LOCKDEP_WARN(c, s) do { } while (0) #define rcu_sleep_check() do { } while (0) @@ -811,6 +792,28 @@ static inline void rcu_preempt_sleep_check(void) #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** + * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism + * @p: The pointer to hand off + * + * This is simply an identity function, but it documents where a pointer + * is handed off from RCU to some other synchronization mechanism, for + * example, reference counting or locking. In C11, it would map to + * kill_dependency(). It could be used as follows: + * + * rcu_read_lock(); + * p = rcu_dereference(gp); + * long_lived = is_long_lived(p); + * if (long_lived) { + * if (!atomic_inc_not_zero(p->refcnt)) + * long_lived = false; + * else + * p = rcu_pointer_handoff(p); + * } + * rcu_read_unlock(); + */ +#define rcu_pointer_handoff(p) (p) + +/** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs @@ -1065,7 +1068,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define __kfree_rcu(head, offset) \ do { \ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ - kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ + kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ } while (0) /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index ff968b7af3a4..4c1aaf9cce7b 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -83,7 +83,7 @@ static inline void synchronize_sched_expedited(void) } static inline void kfree_call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) + rcu_callback_t func) { call_rcu(head, func); } @@ -216,6 +216,7 @@ static inline bool rcu_is_watching(void) static inline void rcu_all_qs(void) { + barrier(); /* Avoid RCU read-side critical sections leaking across. */ } #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5abec82f325e..60d15a080d7c 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -48,7 +48,7 @@ void synchronize_rcu_bh(void); void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); -void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); /** * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 8fc0bfd8edc4..d68bb402120e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -296,6 +296,8 @@ typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg, unsigned int *val); typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg, unsigned int val); +typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); typedef struct regmap_async *(*regmap_hw_async_alloc)(void); typedef void (*regmap_hw_free_context)(void *context); @@ -335,6 +337,7 @@ struct regmap_bus { regmap_hw_gather_write gather_write; regmap_hw_async_write async_write; regmap_hw_reg_write reg_write; + regmap_hw_reg_update_bits reg_update_bits; regmap_hw_read read; regmap_hw_reg_read reg_read; regmap_hw_free_context free_context; @@ -791,6 +794,9 @@ struct regmap_irq { unsigned int mask; }; +#define REGMAP_IRQ_REG(_irq, _off, _mask) \ + [_irq] = { .reg_offset = (_off), .mask = (_mask) } + /** * Description of a generic regmap irq_chip. This is not intended to * handle every possible interrupt controller, but it should handle a @@ -800,6 +806,8 @@ struct regmap_irq { * * @status_base: Base status register address. * @mask_base: Base mask register address. + * @unmask_base: Base unmask register address. for chips who have + * separate mask and unmask registers * @ack_base: Base ack address. If zero then the chip is clear on read. * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. @@ -807,6 +815,7 @@ struct regmap_irq { * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. * @use_ack: Use @ack register even if it is zero. + * @ack_invert: Inverted ack register: cleared bits for ack. * @wake_invert: Inverted wake register: cleared bits are wake enabled. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * @@ -820,12 +829,14 @@ struct regmap_irq_chip { unsigned int status_base; unsigned int mask_base; + unsigned int unmask_base; unsigned int ack_base; unsigned int wake_base; unsigned int irq_reg_stride; bool init_ack_masked:1; bool mask_invert:1; bool use_ack:1; + bool ack_invert:1; bool wake_invert:1; bool runtime_pm:1; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 45932228cbf5..9c2903e58adb 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -245,6 +245,7 @@ enum regulator_type { * @linear_min_sel: Minimal selector for starting linear mapping * @fixed_uV: Fixed voltage of rails. * @ramp_delay: Time to settle down after voltage change (unit: uV/us) + * @min_dropout_uV: The minimum dropout voltage this regulator can handle * @linear_ranges: A constant table of possible voltage ranges. * @n_linear_ranges: Number of entries in the @linear_ranges table. * @volt_table: Voltage mapping table (if table based mapping) @@ -292,6 +293,7 @@ struct regulator_desc { unsigned int linear_min_sel; int fixed_uV; unsigned int ramp_delay; + int min_dropout_uV; const struct regulator_linear_range *linear_ranges; int n_linear_ranges; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e2c13cd863bd..4acc552e9279 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -154,8 +154,8 @@ ring_buffer_swap_cpu(struct ring_buffer *buffer_a, } #endif -int ring_buffer_empty(struct ring_buffer *buffer); -int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); +bool ring_buffer_empty(struct ring_buffer *buffer); +bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable(struct ring_buffer *buffer); void ring_buffer_record_enable(struct ring_buffer *buffer); diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h index 3f594dce5716..fe3dc64e5aeb 100644 --- a/include/linux/rotary_encoder.h +++ b/include/linux/rotary_encoder.h @@ -8,9 +8,10 @@ struct rotary_encoder_platform_data { unsigned int gpio_b; unsigned int inverted_a; unsigned int inverted_b; + unsigned int steps_per_period; bool relative_axis; bool rollover; - bool half_period; + bool wakeup_source; }; #endif /* __ROTARY_ENCODER_H__ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 39adaa9529eb..4be5048b1fbe 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -33,11 +33,11 @@ extern wait_queue_head_t netdev_unregistering_wq; extern struct mutex net_mutex; #ifdef CONFIG_PROVE_LOCKING -extern int lockdep_rtnl_is_held(void); +extern bool lockdep_rtnl_is_held(void); #else -static inline int lockdep_rtnl_is_held(void) +static inline bool lockdep_rtnl_is_held(void) { - return 1; + return true; } #endif /* #ifdef CONFIG_PROVE_LOCKING */ diff --git a/include/linux/sched.h b/include/linux/sched.h index b7b9501b41af..4069febaa34a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -384,6 +384,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; void lockup_detector_init(void); #else static inline void touch_softlockup_watchdog(void) @@ -599,33 +600,42 @@ struct task_cputime_atomic { .sum_exec_runtime = ATOMIC64_INIT(0), \ } -#ifdef CONFIG_PREEMPT_COUNT -#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) -#else -#define PREEMPT_DISABLED PREEMPT_ENABLED -#endif +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /* - * Disable preemption until the scheduler is running. - * Reset by start_kernel()->sched_init()->init_idle(). + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. * - * We include PREEMPT_ACTIVE to avoid cond_resched() from working - * before the scheduler is active -- see should_resched(). + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). */ -#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). + */ +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. - * @running: non-zero when there are timers running and - * @cputime receives updates. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; - int running; + bool running; + bool checking_timer; }; #include <linux/rwsem.h> @@ -762,18 +772,6 @@ struct signal_struct { unsigned audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; #endif -#ifdef CONFIG_CGROUPS - /* - * group_rwsem prevents new tasks from entering the threadgroup and - * member tasks from exiting,a more specifically, setting of - * PF_EXITING. fork and exit paths are protected with this rwsem - * using threadgroup_change_begin/end(). Users which require - * threadgroup to remain stable should use threadgroup_[un]lock() - * which also takes care of exec path. Currently, cgroup is the - * only user. - */ - struct rw_semaphore group_rwsem; -#endif oom_flags_t oom_flags; short oom_score_adj; /* OOM kill score adjustment */ @@ -840,7 +838,7 @@ struct user_struct { struct hlist_node uidhash_node; kuid_t uid; -#ifdef CONFIG_PERF_EVENTS +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) atomic_long_t locked_vm; #endif }; @@ -1139,8 +1137,6 @@ struct sched_domain_topology_level { #endif }; -extern struct sched_domain_topology_level *sched_domain_topology; - extern void set_sched_topology(struct sched_domain_topology_level *tl); extern void wake_up_if_idle(int cpu); @@ -1189,10 +1185,10 @@ struct load_weight { /* * The load_avg/util_avg accumulates an infinite geometric series. - * 1) load_avg factors the amount of time that a sched_entity is - * runnable on a rq into its weight. For cfs_rq, it is the aggregated - * such weights of all runnable and blocked sched_entities. - * 2) util_avg factors frequency scaling into the amount of time + * 1) load_avg factors frequency scaling into the amount of time that a + * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the + * aggregated such weights of all runnable and blocked sched_entities. + * 2) util_avg factors frequency and cpu scaling into the amount of time * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. * For cfs_rq, it is the aggregated such times of all runnable and * blocked sched_entities. @@ -1342,10 +1338,12 @@ struct sched_dl_entity { union rcu_special { struct { - bool blocked; - bool need_qs; - } b; - short s; + u8 blocked; + u8 need_qs; + u8 exp_need_qs; + u8 pad; /* Otherwise the compiler can store garbage here. */ + } b; /* Bits. */ + u32 s; /* Set of bits. */ }; struct rcu_node; @@ -1463,7 +1461,9 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; - +#ifdef CONFIG_MEMCG + unsigned memcg_may_oom:1; +#endif #ifdef CONFIG_MEMCG_KMEM unsigned memcg_kmem_skip_account:1; #endif @@ -1570,9 +1570,7 @@ struct task_struct { unsigned long sas_ss_sp; size_t sas_ss_size; - int (*notifier)(void *priv); - void *notifier_data; - sigset_t *notifier_mask; + struct callback_head *task_works; struct audit_context *audit_context; @@ -1794,12 +1792,12 @@ struct task_struct { unsigned long trace_recursion; #endif /* CONFIG_TRACING */ #ifdef CONFIG_MEMCG - struct memcg_oom_info { - struct mem_cgroup *memcg; - gfp_t gfp_mask; - int order; - unsigned int may_oom:1; - } memcg_oom; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + + /* number of pages to reclaim on returning to userland */ + unsigned int memcg_nr_pages_over_high; #endif #ifdef CONFIG_UPROBES struct uprobe_task *utask; @@ -2464,21 +2462,29 @@ extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); -static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) +static inline int kernel_dequeue_signal(siginfo_t *info) { - unsigned long flags; + struct task_struct *tsk = current; + siginfo_t __info; int ret; - spin_lock_irqsave(&tsk->sighand->siglock, flags); - ret = dequeue_signal(tsk, mask, info); - spin_unlock_irqrestore(&tsk->sighand->siglock, flags); + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); return ret; } -extern void block_all_signals(int (*notifier)(void *priv), void *priv, - sigset_t *mask); -extern void unblock_all_signals(void); +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + __set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} + extern void release_task(struct task_struct * p); extern int send_sig_info(int, struct siginfo *, struct task_struct *); extern int force_sigsegv(int, struct task_struct *); diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 9d303b8847df..9089a2ae913d 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -21,4 +21,9 @@ static inline int dl_task(struct task_struct *p) return dl_prio(p->prio); } +static inline bool dl_time_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + #endif /* _SCHED_DEADLINE_H */ diff --git a/include/linux/scif.h b/include/linux/scif.h index 44f4f3898bbe..49a35d6edc94 100644 --- a/include/linux/scif.h +++ b/include/linux/scif.h @@ -55,6 +55,7 @@ #include <linux/types.h> #include <linux/poll.h> +#include <linux/device.h> #include <linux/scif_ioctl.h> #define SCIF_ACCEPT_SYNC 1 @@ -92,6 +93,70 @@ enum { #define SCIF_PORT_RSVD 1088 typedef struct scif_endpt *scif_epd_t; +typedef struct scif_pinned_pages *scif_pinned_pages_t; + +/** + * struct scif_range - SCIF registered range used in kernel mode + * @cookie: cookie used internally by SCIF + * @nr_pages: number of pages of PAGE_SIZE + * @prot_flags: R/W protection + * @phys_addr: Array of bus addresses + * @va: Array of kernel virtual addresses backed by the pages in the phys_addr + * array. The va is populated only when called on the host for a remote + * SCIF connection on MIC. This is required to support the use case of DMA + * between MIC and another device which is not a SCIF node e.g., an IB or + * ethernet NIC. + */ +struct scif_range { + void *cookie; + int nr_pages; + int prot_flags; + dma_addr_t *phys_addr; + void __iomem **va; +}; + +/** + * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll + * @epd: SCIF endpoint + * @events: requested events + * @revents: returned events + */ +struct scif_pollepd { + scif_epd_t epd; + short events; + short revents; +}; + +/** + * scif_peer_dev - representation of a peer SCIF device + * + * Peer devices show up as PCIe devices for the mgmt node but not the cards. + * The mgmt node discovers all the cards on the PCIe bus and informs the other + * cards about their peers. Upon notification of a peer a node adds a peer + * device to the peer bus to maintain symmetry in the way devices are + * discovered across all nodes in the SCIF network. + * + * @dev: underlying device + * @dnode - The destination node which this device will communicate with. + */ +struct scif_peer_dev { + struct device dev; + u8 dnode; +}; + +/** + * scif_client - representation of a SCIF client + * @name: client name + * @probe - client method called when a peer device is registered + * @remove - client method called when a peer device is unregistered + * @si - subsys_interface used internally for implementing SCIF clients + */ +struct scif_client { + const char *name; + void (*probe)(struct scif_peer_dev *spdev); + void (*remove)(struct scif_peer_dev *spdev); + struct subsys_interface si; +}; #define SCIF_OPEN_FAILED ((scif_epd_t)-1) #define SCIF_REGISTER_FAILED ((off_t)-1) @@ -345,7 +410,6 @@ int scif_close(scif_epd_t epd); * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - An invalid address was specified for a parameter * EINVAL - flags is invalid, or len is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -398,7 +462,6 @@ int scif_send(scif_epd_t epd, void *msg, int len, int flags); * EAGAIN - The destination node is returning from a low power state * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - An invalid address was specified for a parameter * EINVAL - flags is invalid, or len is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -461,9 +524,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags); * SCIF_PROT_READ - allow read operations from the window * SCIF_PROT_WRITE - allow write operations to the window * - * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a - * fixed offset. - * * Return: * Upon successful completion, scif_register() returns the offset at which the * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that @@ -476,7 +536,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags); * EAGAIN - The mapping could not be performed due to lack of resources * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is * set in flags, and offset is not a multiple of the page size, or addr is not a * multiple of the page size, or len is not a multiple of the page size, or is @@ -759,7 +818,6 @@ int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -840,7 +898,6 @@ int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -984,10 +1041,299 @@ int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, * online nodes in the SCIF network including 'self'; otherwise in user mode * -1 is returned and errno is set to indicate the error; in kernel mode no * errors are returned. + */ +int scif_get_node_ids(u16 *nodes, int len, u16 *self); + +/** + * scif_pin_pages() - Pin a set of pages + * @addr: Virtual address of range to pin + * @len: Length of range to pin + * @prot_flags: Page protection flags + * @map_flags: Page classification flags + * @pinned_pages: Handle to pinned pages + * + * scif_pin_pages() pins (locks in physical memory) the physical pages which + * back the range of virtual address pages starting at addr and continuing for + * len bytes. addr and len are constrained to be multiples of the page size. A + * successful scif_pin_pages() call returns a handle to pinned_pages which may + * be used in subsequent calls to scif_register_pinned_pages(). + * + * The pages will remain pinned as long as there is a reference against the + * scif_pinned_pages_t value returned by scif_pin_pages() and until + * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A + * reference is added to a scif_pinned_pages_t value each time a window is + * created by calling scif_register_pinned_pages() and passing the + * scif_pinned_pages_t value. A reference is removed from a + * scif_pinned_pages_t value each time such a window is deleted. + * + * Subsequent operations which change the memory pages to which virtual + * addresses are mapped (such as mmap(), munmap()) have no effect on the + * scif_pinned_pages_t value or windows created against it. + * + * If the process will fork(), it is recommended that the registered + * virtual address range be marked with MADV_DONTFORK. Doing so will prevent + * problems due to copy-on-write semantics. + * + * The prot_flags argument is formed by OR'ing together one or more of the + * following values. + * SCIF_PROT_READ - allow read operations against the pages + * SCIF_PROT_WRITE - allow write operations against the pages + * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a + * kernel space address. By default, addr is interpreted as a user space + * address. + * + * Return: + * Upon successful completion, scif_pin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. * * Errors: - * EFAULT - Bad address + * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative + * ENOMEM - Not enough space */ -int scif_get_node_ids(u16 *nodes, int len, u16 *self); +int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, + scif_pinned_pages_t *pinned_pages); + +/** + * scif_unpin_pages() - Unpin a set of pages + * @pinned_pages: Handle to pinned pages to be unpinned + * + * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new + * windows against pinned_pages. The physical pages represented by pinned_pages + * will remain pinned until all windows previously registered against + * pinned_pages are deleted (the window is scif_unregister()'d and all + * references to the window are removed (see scif_unregister()). + * + * pinned_pages must have been obtain from a previous call to scif_pin_pages(). + * After calling scif_unpin_pages(), it is an error to pass pinned_pages to + * scif_register_pinned_pages(). + * + * Return: + * Upon successful completion, scif_unpin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * + * Errors: + * EINVAL - pinned_pages is not valid + */ +int scif_unpin_pages(scif_pinned_pages_t pinned_pages); + +/** + * scif_register_pinned_pages() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @pinned_pages: Handle to pinned pages + * @offset: Registered address space offset + * @map_flags: Flags which control where pages are mapped + * + * The scif_register_pinned_pages() function opens a window, a range of whole + * pages of the registered address space of the endpoint epd, starting at + * offset po. The value of po, further described below, is a function of the + * parameters offset and pinned_pages, and the value of map_flags. Each page of + * the window represents a corresponding physical memory page of the range + * represented by pinned_pages; the length of the window is the same as the + * length of range represented by pinned_pages. A successful + * scif_register_pinned_pages() call returns po as the return value. + * + * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset + * exactly, and offset is constrained to be a multiple of the page size. The + * mapping established by scif_register_pinned_pages() will not replace any + * existing registration; an error is returned if any page of the new window + * would intersect an existing window. + * + * When SCIF_MAP_FIXED is not set, the implementation uses offset in an + * implementation-defined manner to arrive at po. The po so chosen will be an + * area of the registered address space that the implementation deems suitable + * for a mapping of the required size. An offset value of 0 is interpreted as + * granting the implementation complete freedom in selecting po, subject to + * constraints described below. A non-zero value of offset is taken to be a + * suggestion of an offset near which the mapping should be placed. When the + * implementation selects a value for po, it does not replace any extant + * window. In all cases, po will be a multiple of the page size. + * + * The physical pages which are so represented by a window are available for + * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(), + * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the + * physical pages represented by the window will not be reused by the memory + * subsystem for any other purpose. Note that the same physical page may be + * represented by multiple windows. + * + * Windows created by scif_register_pinned_pages() are unregistered by + * scif_unregister(). + * + * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a + * fixed offset. + * + * Return: + * Upon successful completion, scif_register_pinned_pages() returns the offset + * at which the mapping was placed (po); otherwise the negative of one of the + * following errors is returned. + * + * Errors: + * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window + * would intersect an existing window + * EAGAIN - The mapping could not be performed due to lack of resources + * ECONNRESET - Connection reset by peer + * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and + * offset is not a multiple of the page size, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +off_t scif_register_pinned_pages(scif_epd_t epd, + scif_pinned_pages_t pinned_pages, + off_t offset, int map_flags); + +/** + * scif_get_pages() - Add references to remote registered pages + * @epd: endpoint descriptor + * @offset: remote registered offset + * @len: length of range of pages + * @pages: returned scif_range structure + * + * scif_get_pages() returns the addresses of the physical pages represented by + * those pages of the registered address space of the peer of epd, starting at + * offset and continuing for len bytes. offset and len are constrained to be + * multiples of the page size. + * + * All of the pages in the specified range [offset, offset + len - 1] must be + * within a single window of the registered address space of the peer of epd. + * + * The addresses are returned as a virtually contiguous array pointed to by the + * phys_addr component of the scif_range structure whose address is returned in + * pages. The nr_pages component of scif_range is the length of the array. The + * prot_flags component of scif_range holds the protection flag value passed + * when the pages were registered. + * + * Each physical page whose address is returned by scif_get_pages() remains + * available and will not be released for reuse until the scif_range structure + * is returned in a call to scif_put_pages(). The scif_range structure returned + * by scif_get_pages() must be unmodified. + * + * It is an error to call scif_close() on an endpoint on which a scif_range + * structure of that endpoint has not been returned to scif_put_pages(). + * + * Return: + * Upon successful completion, scif_get_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * ECONNRESET - Connection reset by peer. + * EINVAL - offset is not a multiple of the page size, or offset is negative, or + * len is not a multiple of the page size + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid + * for the registered address space of the peer epd + */ +int scif_get_pages(scif_epd_t epd, off_t offset, size_t len, + struct scif_range **pages); + +/** + * scif_put_pages() - Remove references from remote registered pages + * @pages: pages to be returned + * + * scif_put_pages() releases a scif_range structure previously obtained by + * calling scif_get_pages(). The physical pages represented by pages may + * be reused when the window which represented those pages is unregistered. + * Therefore, those pages must not be accessed after calling scif_put_pages(). + * + * Return: + * Upon successful completion, scif_put_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * EINVAL - pages does not point to a valid scif_range structure, or + * the scif_range structure pointed to by pages was already returned + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + */ +int scif_put_pages(struct scif_range *pages); + +/** + * scif_poll() - Wait for some event on an endpoint + * @epds: Array of endpoint descriptors + * @nepds: Length of epds + * @timeout: Upper limit on time for which scif_poll() will block + * + * scif_poll() waits for one of a set of endpoints to become ready to perform + * an I/O operation. + * + * The epds argument specifies the endpoint descriptors to be examined and the + * events of interest for each endpoint descriptor. epds is a pointer to an + * array with one member for each open endpoint descriptor of interest. + * + * The number of items in the epds array is specified in nepds. The epd field + * of scif_pollepd is an endpoint descriptor of an open endpoint. The field + * events is a bitmask specifying the events which the application is + * interested in. The field revents is an output parameter, filled by the + * kernel with the events that actually occurred. The bits returned in revents + * can include any of those specified in events, or one of the values POLLERR, + * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events + * field, and will be set in the revents field whenever the corresponding + * condition is true.) + * + * If none of the events requested (and no error) has occurred for any of the + * endpoint descriptors, then scif_poll() blocks until one of the events occurs. + * + * The timeout argument specifies an upper limit on the time for which + * scif_poll() will block, in milliseconds. Specifying a negative value in + * timeout means an infinite timeout. + * + * The following bits may be set in events and returned in revents. + * POLLIN - Data may be received without blocking. For a connected + * endpoint, this means that scif_recv() may be called without blocking. For a + * listening endpoint, this means that scif_accept() may be called without + * blocking. + * POLLOUT - Data may be sent without blocking. For a connected endpoint, this + * means that scif_send() may be called without blocking. POLLOUT may also be + * used to block waiting for a non-blocking connect to complete. This bit value + * has no meaning for a listening endpoint and is ignored if specified. + * + * The following bits are only returned in revents, and are ignored if set in + * events. + * POLLERR - An error occurred on the endpoint + * POLLHUP - The connection to the peer endpoint was disconnected + * POLLNVAL - The specified endpoint descriptor is invalid. + * + * Return: + * Upon successful completion, scif_poll() returns a non-negative value. A + * positive value indicates the total number of endpoint descriptors that have + * been selected (that is, endpoint descriptors for which the revents member is + * non-zero). A value of 0 indicates that the call timed out and no endpoint + * descriptors have been selected. Otherwise in user mode -1 is returned and + * errno is set to indicate the error; in kernel mode the negative of one of + * the following errors is returned. + * + * Errors: + * EINTR - A signal occurred before any requested event + * EINVAL - The nepds argument is greater than {OPEN_MAX} + * ENOMEM - There was no space to allocate file descriptor tables + */ +int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout); + +/** + * scif_client_register() - Register a SCIF client + * @client: client to be registered + * + * scif_client_register() registers a SCIF client. The probe() method + * of the client is called when SCIF peer devices come online and the + * remove() method is called when the peer devices disappear. + * + * Return: + * Upon successful completion, scif_client_register() returns a non-negative + * value. Otherwise the return value is the same as subsys_interface_register() + * in the kernel. + */ +int scif_client_register(struct scif_client *client); + +/** + * scif_client_unregister() - Unregister a SCIF client + * @client: client to be unregistered + * + * scif_client_unregister() unregisters a SCIF client. + * + * Return: + * None + */ +void scif_client_unregister(struct scif_client *client); #endif /* __SCIF_H__ */ diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index f4265039a94c..2296e6b2f690 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -95,4 +95,15 @@ static inline void get_seccomp_filter(struct task_struct *tsk) return; } #endif /* CONFIG_SECCOMP_FILTER */ + +#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) +extern long seccomp_get_filter(struct task_struct *task, + unsigned long filter_off, void __user *data); +#else +static inline long seccomp_get_filter(struct task_struct *task, + unsigned long n, void __user *data) +{ + return -EINVAL; +} +#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ #endif /* _LINUX_SECCOMP_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4398411236f1..4355129fff91 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -463,6 +463,15 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, return delta_us; } +static inline bool skb_mstamp_after(const struct skb_mstamp *t1, + const struct skb_mstamp *t0) +{ + s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; + + if (!diff) + diff = t1->stamp_us - t0->stamp_us; + return diff > 0; +} /** * struct sk_buff - socket buffer @@ -1215,7 +1224,7 @@ static inline int skb_cloned(const struct sk_buff *skb) static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) { - might_sleep_if(pri & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_cloned(skb)) return pskb_expand_head(skb, 0, 0, pri); @@ -1299,7 +1308,7 @@ static inline int skb_shared(const struct sk_buff *skb) */ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) { - might_sleep_if(pri & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, pri); @@ -1335,7 +1344,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) static inline struct sk_buff *skb_unshare(struct sk_buff *skb, gfp_t pri) { - might_sleep_if(pri & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, pri); diff --git a/include/linux/slab.h b/include/linux/slab.h index 7e37d448ed91..7c82e3b307a3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -111,7 +111,7 @@ struct mem_cgroup; * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); -int slab_is_available(void); +bool slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index e6109a6cd8f6..12910cf19869 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -24,9 +24,6 @@ struct smpboot_thread_data; * parked (cpu offline) * @unpark: Optional unpark function, called when the thread is * unparked (cpu online) - * @pre_unpark: Optional unpark function, called before the thread is - * unparked (cpu online). This is not guaranteed to be - * called on the target cpu of the thread. Careful! * @cpumask: Internal state. To update which threads are unparked, * call smpboot_update_cpumask_percpu_thread(). * @selfparking: Thread is not parked by the park function. @@ -42,7 +39,6 @@ struct smp_hotplug_thread { void (*cleanup)(unsigned int cpu, bool online); void (*park)(unsigned int cpu); void (*unpark)(unsigned int cpu); - void (*pre_unpark)(unsigned int cpu); cpumask_var_t cpumask; bool selfparking; const char *thread_comm; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6b00f18f5e6b..cce80e6dc7d1 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -51,6 +51,8 @@ extern struct bus_type spi_bus_type; * @bytes_tx: number of bytes sent to device * @bytes_rx: number of bytes received from device * + * @transfer_bytes_histo: + * transfer bytes histogramm */ struct spi_statistics { spinlock_t lock; /* lock for the whole structure */ @@ -68,6 +70,8 @@ struct spi_statistics { unsigned long long bytes_rx; unsigned long long bytes_tx; +#define SPI_STATISTICS_HISTO_SIZE 17 + unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; }; void spi_statistics_add_transfer_stats(struct spi_statistics *stats, @@ -250,7 +254,7 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv) return drv ? container_of(drv, struct spi_driver, driver) : NULL; } -extern int spi_register_driver(struct spi_driver *sdrv); +extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); /** * spi_unregister_driver - reverse effect of spi_register_driver @@ -263,6 +267,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) driver_unregister(&sdrv->driver); } +/* use a define to avoid include chaining to get THIS_MODULE */ +#define spi_register_driver(driver) \ + __spi_register_driver(THIS_MODULE, driver) + /** * module_spi_driver() - Helper macro for registering a SPI driver * @__spi_driver: spi_driver struct @@ -843,8 +851,10 @@ extern int spi_bus_unlock(struct spi_master *master); * @len: data buffer size * Context: can sleep * - * This writes the buffer and returns zero or a negative error code. + * This function writes the buffer @buf. * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. */ static inline int spi_write(struct spi_device *spi, const void *buf, size_t len) @@ -867,8 +877,10 @@ spi_write(struct spi_device *spi, const void *buf, size_t len) * @len: data buffer size * Context: can sleep * - * This reads the buffer and returns zero or a negative error code. + * This function reads the buffer @buf. * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. */ static inline int spi_read(struct spi_device *spi, void *buf, size_t len) @@ -895,7 +907,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len) * * For more specific semantics see spi_sync(). * - * It returns zero on success, else a negative error code. + * Return: Return: zero on success, else a negative error code. */ static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, @@ -919,9 +931,10 @@ extern int spi_write_then_read(struct spi_device *spi, * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) eight bit number returned by the - * device, or else a negative error code. Callable only from - * contexts that can sleep. + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) eight bit number returned by the + * device, or else a negative error code. */ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) { @@ -940,12 +953,13 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) sixteen bit number returned by the - * device, or else a negative error code. Callable only from - * contexts that can sleep. - * * The number is returned in wire-order, which is at least sometimes * big-endian. + * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the + * device, or else a negative error code. */ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) { @@ -964,13 +978,13 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) sixteen bit number returned by the device in cpu - * endianness, or else a negative error code. Callable only from contexts that - * can sleep. - * * This function is similar to spi_w8r16, with the exception that it will * convert the read 16 bit data word from big-endian to native endianness. * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the device in cpu + * endianness, or else a negative error code. */ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index 85578d4be034..154788ed218c 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -4,7 +4,7 @@ #include <linux/workqueue.h> struct spi_bitbang { - spinlock_t lock; + struct mutex lock; u8 busy; u8 use_dma; u8 flags; /* extra spi->mode support */ diff --git a/include/linux/spmi.h b/include/linux/spmi.h index f84212cd3b7d..1396a255d2a2 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -153,7 +153,9 @@ static inline struct spmi_driver *to_spmi_driver(struct device_driver *d) return container_of(d, struct spmi_driver, driver); } -int spmi_driver_register(struct spmi_driver *sdrv); +#define spmi_driver_register(sdrv) \ + __spmi_driver_register(sdrv, THIS_MODULE) +int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner); /** * spmi_driver_unregister() - unregister an SPMI client driver diff --git a/include/linux/srcu.h b/include/linux/srcu.h index bdeb4567b71e..f5f80c5643ac 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -215,8 +215,11 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) */ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) { - int retval = __srcu_read_lock(sp); + int retval; + preempt_disable(); + retval = __srcu_read_lock(sp); + preempt_enable(); rcu_lock_acquire(&(sp)->dep_map); return retval; } diff --git a/include/linux/stm.h b/include/linux/stm.h new file mode 100644 index 000000000000..9d0083d364e6 --- /dev/null +++ b/include/linux/stm.h @@ -0,0 +1,126 @@ +/* + * System Trace Module (STM) infrastructure apis + * Copyright (C) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _STM_H_ +#define _STM_H_ + +#include <linux/device.h> + +/** + * enum stp_packet_type - STP packets that an STM driver sends + */ +enum stp_packet_type { + STP_PACKET_DATA = 0, + STP_PACKET_FLAG, + STP_PACKET_USER, + STP_PACKET_MERR, + STP_PACKET_GERR, + STP_PACKET_TRIG, + STP_PACKET_XSYNC, +}; + +/** + * enum stp_packet_flags - STP packet modifiers + */ +enum stp_packet_flags { + STP_PACKET_MARKED = 0x1, + STP_PACKET_TIMESTAMPED = 0x2, +}; + +struct stp_policy; + +struct stm_device; + +/** + * struct stm_data - STM device description and callbacks + * @name: device name + * @stm: internal structure, only used by stm class code + * @sw_start: first STP master available to software + * @sw_end: last STP master available to software + * @sw_nchannels: number of STP channels per master + * @sw_mmiosz: size of one channel's IO space, for mmap, optional + * @packet: callback that sends an STP packet + * @mmio_addr: mmap callback, optional + * @link: called when a new stm_source gets linked to us, optional + * @unlink: likewise for unlinking, again optional + * @set_options: set device-specific options on a channel + * + * Fill out this structure before calling stm_register_device() to create + * an STM device and stm_unregister_device() to destroy it. It will also be + * passed back to @packet(), @mmio_addr(), @link(), @unlink() and @set_options() + * callbacks. + * + * Normally, an STM device will have a range of masters available to software + * and the rest being statically assigned to various hardware trace sources. + * The former is defined by the the range [@sw_start..@sw_end] of the device + * description. That is, the lowest master that can be allocated to software + * writers is @sw_start and data from this writer will appear is @sw_start + * master in the STP stream. + */ +struct stm_data { + const char *name; + struct stm_device *stm; + unsigned int sw_start; + unsigned int sw_end; + unsigned int sw_nchannels; + unsigned int sw_mmiosz; + ssize_t (*packet)(struct stm_data *, unsigned int, + unsigned int, unsigned int, + unsigned int, unsigned int, + const unsigned char *); + phys_addr_t (*mmio_addr)(struct stm_data *, unsigned int, + unsigned int, unsigned int); + int (*link)(struct stm_data *, unsigned int, + unsigned int); + void (*unlink)(struct stm_data *, unsigned int, + unsigned int); + long (*set_options)(struct stm_data *, unsigned int, + unsigned int, unsigned int, + unsigned long); +}; + +int stm_register_device(struct device *parent, struct stm_data *stm_data, + struct module *owner); +void stm_unregister_device(struct stm_data *stm_data); + +struct stm_source_device; + +/** + * struct stm_source_data - STM source device description and callbacks + * @name: device name, will be used for policy lookup + * @src: internal structure, only used by stm class code + * @nr_chans: number of channels to allocate + * @link: called when this source gets linked to an STM device + * @unlink: called when this source is about to get unlinked from its STM + * + * Fill in this structure before calling stm_source_register_device() to + * register a source device. Also pass it to unregister and write calls. + */ +struct stm_source_data { + const char *name; + struct stm_source_device *src; + unsigned int percpu; + unsigned int nr_chans; + int (*link)(struct stm_source_data *data); + void (*unlink)(struct stm_source_data *data); +}; + +int stm_source_register_device(struct device *parent, + struct stm_source_data *data); +void stm_source_unregister_device(struct stm_source_data *data); + +int stm_source_write(struct stm_source_data *data, unsigned int chan, + const char *buf, size_t count); + +#endif /* _STM_H_ */ diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 414d924318ce..0adedca24c5b 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -33,6 +33,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +void stop_machine_park(int cpu); +void stop_machine_unpark(int cpu); #else /* CONFIG_SMP */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 8df43c9f11dc..4397a4824c81 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -38,6 +38,11 @@ void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); +/* Socket backchannel transport methods */ +int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); +void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); +void xprt_free_bc_rqst(struct rpc_rqst *req); + /* * Determine if a shared backchannel is in use */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 7ccc961f33e9..f869807a0d0e 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -105,11 +105,9 @@ struct svc_rdma_chunk_sge { }; struct svc_rdma_fastreg_mr { struct ib_mr *mr; - void *kva; - struct ib_fast_reg_page_list *page_list; - int page_list_len; + struct scatterlist *sg; + int sg_nents; unsigned long access_flags; - unsigned long map_len; enum dma_data_direction direction; struct list_head frmr_list; }; @@ -228,9 +226,13 @@ extern void svc_rdma_put_frmr(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *); extern void svc_sq_reap(struct svcxprt_rdma *); extern void svc_rq_reap(struct svcxprt_rdma *); -extern struct svc_xprt_class svc_rdma_class; extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); +extern struct svc_xprt_class svc_rdma_class; +#ifdef CONFIG_SUNRPC_BACKCHANNEL +extern struct svc_xprt_class svc_rdma_bc_class; +#endif + /* svc_rdma.c */ extern int svc_rdma_init(void); extern void svc_rdma_cleanup(void); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 0fb9acbb4780..69ef5b3ab038 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -54,6 +54,8 @@ enum rpc_display_format_t { struct rpc_task; struct rpc_xprt; struct seq_file; +struct svc_serv; +struct net; /* * This describes a complete RPC request @@ -136,6 +138,12 @@ struct rpc_xprt_ops { int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); void (*inject_disconnect)(struct rpc_xprt *xprt); + int (*bc_setup)(struct rpc_xprt *xprt, + unsigned int min_reqs); + int (*bc_up)(struct svc_serv *serv, struct net *net); + void (*bc_free_rqst)(struct rpc_rqst *rqst); + void (*bc_destroy)(struct rpc_xprt *xprt, + unsigned int max_reqs); }; /* @@ -153,6 +161,7 @@ enum xprt_transports { XPRT_TRANSPORT_TCP = IPPROTO_TCP, XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_RDMA = 256, + XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_LOCAL = 257, }; diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 357e44c1a46b..0ece4ba06f06 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -44,6 +44,8 @@ struct sock_xprt { */ unsigned long sock_state; struct delayed_work connect_worker; + struct work_struct recv_worker; + struct mutex recv_mutex; struct sockaddr_storage srcaddr; unsigned short srcport; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5efe743ce1e8..8b6ec7ef0854 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -202,6 +202,36 @@ struct platform_freeze_ops { extern void suspend_set_ops(const struct platform_suspend_ops *ops); extern int suspend_valid_only_mem(suspend_state_t state); +extern unsigned int pm_suspend_global_flags; + +#define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0) +#define PM_SUSPEND_FLAG_FW_RESUME (1 << 1) + +static inline void pm_suspend_clear_flags(void) +{ + pm_suspend_global_flags = 0; +} + +static inline void pm_set_suspend_via_firmware(void) +{ + pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND; +} + +static inline void pm_set_resume_via_firmware(void) +{ + pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; +} + +static inline bool pm_suspend_via_firmware(void) +{ + return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND); +} + +static inline bool pm_resume_via_firmware(void) +{ + return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); +} + /* Suspend-to-idle state machnine. */ enum freeze_state { FREEZE_STATE_NONE, /* Not suspended/suspending. */ @@ -241,6 +271,12 @@ extern int pm_suspend(suspend_state_t state); #else /* !CONFIG_SUSPEND */ #define suspend_valid_only_mem NULL +static inline void pm_suspend_clear_flags(void) {} +static inline void pm_set_suspend_via_firmware(void) {} +static inline void pm_set_resume_via_firmware(void) {} +static inline bool pm_suspend_via_firmware(void) { return false; } +static inline bool pm_resume_via_firmware(void) { return false; } + static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } static inline bool idle_should_freeze(void) { return false; } @@ -387,10 +423,12 @@ extern int unregister_pm_notifier(struct notifier_block *nb); /* drivers/base/power/wakeup.c */ extern bool events_check_enabled; +extern unsigned int pm_wakeup_irq; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); extern void pm_wakeup_clear(void); +extern void pm_system_irq_wakeup(unsigned int irq_number); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); @@ -440,6 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) static inline bool pm_wakeup_pending(void) { return false; } static inline void pm_system_wakeup(void) {} static inline void pm_wakeup_clear(void) {} +static inline void pm_system_irq_wakeup(unsigned int irq_number) {} static inline void lock_system_sleep(void) {} static inline void unlock_system_sleep(void) {} diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a460e2ef2843..a156b82dd14c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -887,4 +887,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, asmlinkage long sys_membarrier(int cmd, int flags); +asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); + #endif diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9f65758311a4..ea090eaf468c 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -268,6 +268,9 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); +int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); @@ -451,6 +454,14 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj, { } +static inline int __compat_only_sysfs_link_entry_to_kobj( + struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name) +{ + return 0; +} + static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 6a8b9942632d..dd8de82cf5b5 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -14,9 +14,9 @@ struct t10_pi_tuple { }; -extern struct blk_integrity t10_pi_type1_crc; -extern struct blk_integrity t10_pi_type1_ip; -extern struct blk_integrity t10_pi_type3_crc; -extern struct blk_integrity t10_pi_type3_ip; +extern struct blk_integrity_profile t10_pi_type1_crc; +extern struct blk_integrity_profile t10_pi_type1_ip; +extern struct blk_integrity_profile t10_pi_type3_crc; +extern struct blk_integrity_profile t10_pi_type3_ip; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 48c3696e8645..c906f4534581 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -112,10 +112,11 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; + struct skb_mstamp snt_synack; /* first SYNACK sent time */ bool tfo_listener; + u32 txhash; u32 rcv_isn; u32 snt_isn; - u32 snt_synack; /* synack sent time */ u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# @@ -193,6 +194,12 @@ struct tcp_sock { u32 window_clamp; /* Maximal window to advertise */ u32 rcv_ssthresh; /* Current window clamp */ + /* Information of the most recently (s)acked skb */ + struct tcp_rack { + struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u8 advanced; /* mstamp advanced since last lost marking */ + u8 reord; /* reordering detected */ + } rack; u16 advmss; /* Advertised MSS */ u8 unused; u8 nonagle : 4,/* Disable Nagle algorithm? */ @@ -216,6 +223,9 @@ struct tcp_sock { u32 mdev_max_us; /* maximal mdev for the last rtt period */ u32 rttvar_us; /* smoothed mdev_max */ u32 rtt_seq; /* sequence number to update rttvar */ + struct rtt_meas { + u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */ + } rtt_min[3]; u32 packets_out; /* Packets which are "in flight" */ u32 retrans_out; /* Retransmitted packets out */ @@ -279,8 +289,6 @@ struct tcp_sock { int lost_cnt_hint; u32 retransmit_high; /* L-bits may be on up to this seqno */ - u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ - u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -355,8 +363,8 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) struct tcp_timewait_sock { struct inet_timewait_sock tw_sk; - u32 tw_rcv_nxt; - u32 tw_snd_nxt; +#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt +#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt u32 tw_rcv_wnd; u32 tw_ts_offset; u32 tw_ts_recent; @@ -381,25 +389,12 @@ static inline bool tcp_passive_fastopen(const struct sock *sk) tcp_sk(sk)->fastopen_rsk != NULL); } -extern void tcp_sock_destruct(struct sock *sk); - -static inline int fastopen_init_queue(struct sock *sk, int backlog) +static inline void fastopen_queue_tune(struct sock *sk, int backlog) { - struct request_sock_queue *queue = - &inet_csk(sk)->icsk_accept_queue; - - if (queue->fastopenq == NULL) { - queue->fastopenq = kzalloc( - sizeof(struct fastopen_queue), - sk->sk_allocation); - if (queue->fastopenq == NULL) - return -ENOMEM; - - sk->sk_destruct = tcp_sock_destruct; - spin_lock_init(&queue->fastopenq->lock); - } - queue->fastopenq->max_qlen = backlog; - return 0; + struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; + int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); + + queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); } static inline void tcp_saved_syn_free(struct tcp_sock *tp) diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index d4217eff489f..0a0d56834c8e 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -158,6 +158,7 @@ struct st_data_s { unsigned long ll_state; void *kim_data; struct tty_struct *tty; + struct work_struct work_write_wakeup; }; /* diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ba0ae09cbb21..ec89d846324c 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); /* * PPS accessor */ -extern void getnstime_raw_and_real(struct timespec *ts_raw, - struct timespec *ts_real); +extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, + struct timespec64 *ts_real); /* * Persistent clock related interfaces diff --git a/include/linux/timex.h b/include/linux/timex.h index 9d3f1a5b6178..39c25dbebfe8 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) extern int do_adjtimex(struct timex *); -extern void hardpps(const struct timespec *, const struct timespec *); +extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); void ntp_notify_cmos_timer(void); diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 8350c538b486..706e63eea080 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -30,6 +30,8 @@ #define TPM_ANY_NUM 0xFFFF struct tpm_chip; +struct trusted_key_payload; +struct trusted_key_options; struct tpm_class_ops { const u8 req_complete_mask; @@ -46,11 +48,22 @@ struct tpm_class_ops { #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) +extern int tpm_is_tpm2(u32 chip_num); extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); +extern int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); +extern int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); #else +static inline int tpm_is_tpm2(u32 chip_num) +{ + return -ENODEV; +} static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { return -ENODEV; } @@ -63,5 +76,18 @@ static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { return -ENODEV; } + +static inline int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) +{ + return -ENODEV; +} +static inline int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) +{ + return -ENODEV; +} #endif #endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ed27917cabc9..429fdfc3baf5 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -168,13 +168,12 @@ struct ring_buffer_event * trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, int type, unsigned long len, unsigned long flags, int pc); -void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc); -void trace_buffer_unlock_commit(struct ring_buffer *buffer, +void trace_buffer_unlock_commit(struct trace_array *tr, + struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc); -void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, +void trace_buffer_unlock_commit_regs(struct trace_array *tr, + struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs); @@ -329,6 +328,7 @@ enum { EVENT_FILE_FL_SOFT_DISABLED_BIT, EVENT_FILE_FL_TRIGGER_MODE_BIT, EVENT_FILE_FL_TRIGGER_COND_BIT, + EVENT_FILE_FL_PID_FILTER_BIT, }; /* @@ -342,6 +342,7 @@ enum { * tracepoint may be enabled) * TRIGGER_MODE - When set, invoke the triggers associated with the event * TRIGGER_COND - When set, one or more triggers has an associated filter + * PID_FILTER - When set, the event is filtered based on pid */ enum { EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), @@ -352,6 +353,7 @@ enum { EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), + EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), }; struct trace_event_file { @@ -430,6 +432,8 @@ extern enum event_trigger_type event_triggers_call(struct trace_event_file *file extern void event_triggers_post_call(struct trace_event_file *file, enum event_trigger_type tt); +bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); + /** * trace_trigger_soft_disabled - do triggers and test if soft disabled * @file: The file pointer of the event to test @@ -449,6 +453,8 @@ trace_trigger_soft_disabled(struct trace_event_file *file) event_triggers_call(file, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; + if (eflags & EVENT_FILE_FL_PID_FILTER) + return trace_event_ignore_this_pid(file); } return false; } @@ -508,7 +514,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, enum event_trigger_type tt = ETT_NONE; if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit(buffer, event, irq_flags, pc); + trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); if (tt) event_triggers_post_call(file, tt); @@ -540,7 +546,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file, enum event_trigger_type tt = ETT_NONE; if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit_regs(buffer, event, + trace_buffer_unlock_commit_regs(file->tr, buffer, event, irq_flags, pc, regs); if (tt) diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 84d497297c5f..26c152122a42 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -50,6 +50,7 @@ #include <linux/ptrace.h> #include <linux/security.h> #include <linux/task_work.h> +#include <linux/memcontrol.h> struct linux_binprm; /* @@ -188,6 +189,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) smp_mb__after_atomic(); if (unlikely(current->task_works)) task_work_run(); + + mem_cgroup_handle_over_high(); } #endif /* <linux/tracehook.h> */ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a5f7f3ecafa3..696a339c592c 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -26,6 +26,7 @@ struct notifier_block; struct tracepoint_func { void *func; void *data; + int prio; }; struct tracepoint { @@ -42,9 +43,14 @@ struct trace_enum_map { unsigned long enum_value; }; +#define TRACEPOINT_DEFAULT_PRIO 10 + extern int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); extern int +tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, + int prio); +extern int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); extern void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), @@ -111,7 +117,18 @@ extern void syscall_unregfunc(void); #define TP_ARGS(args...) args #define TP_CONDITION(args...) args -#ifdef CONFIG_TRACEPOINTS +/* + * Individual subsystem my have a separate configuration to + * enable their tracepoints. By default, this file will create + * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem + * wants to be able to disable its tracepoints from being created + * it can define NOTRACE before including the tracepoint headers. + */ +#if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE) +#define TRACEPOINTS_ENABLED +#endif + +#ifdef TRACEPOINTS_ENABLED /* * it_func[0] is never NULL because there is at least one element in the array @@ -167,10 +184,11 @@ extern void syscall_unregfunc(void); * structure. Force alignment to the same alignment as the section start. * * When lockdep is enabled, we make sure to always do the RCU portions of - * the tracepoint code, regardless of whether tracing is on or we match the - * condition. This lets us find RCU issues triggered with tracepoints even - * when this tracepoint is off. This code has no purpose other than poking - * RCU a bit. + * the tracepoint code, regardless of whether tracing is on. However, + * don't check if the condition is false, due to interaction with idle + * instrumentation. This lets us find RCU issues triggered with tracepoints + * even when this tracepoint is off. This code has no purpose other than + * poking RCU a bit. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ @@ -196,6 +214,13 @@ extern void syscall_unregfunc(void); (void *)probe, data); \ } \ static inline int \ + register_trace_prio_##name(void (*probe)(data_proto), void *data,\ + int prio) \ + { \ + return tracepoint_probe_register_prio(&__tracepoint_##name, \ + (void *)probe, data, prio); \ + } \ + static inline int \ unregister_trace_##name(void (*probe)(data_proto), void *data) \ { \ return tracepoint_probe_unregister(&__tracepoint_##name,\ @@ -234,7 +259,7 @@ extern void syscall_unregfunc(void); #define EXPORT_TRACEPOINT_SYMBOL(name) \ EXPORT_SYMBOL(__tracepoint_##name) -#else /* !CONFIG_TRACEPOINTS */ +#else /* !TRACEPOINTS_ENABLED */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ @@ -266,7 +291,7 @@ extern void syscall_unregfunc(void); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) -#endif /* CONFIG_TRACEPOINTS */ +#endif /* TRACEPOINTS_ENABLED */ #ifdef CONFIG_TRACING /** diff --git a/include/linux/tty.h b/include/linux/tty.h index d072ded41678..5b04b0a5375b 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -227,7 +227,6 @@ struct tty_port { int blocked_open; /* Waiting to open */ int count; /* Usage count */ wait_queue_head_t open_wait; /* Open waiters */ - wait_queue_head_t close_wait; /* Close waiters */ wait_queue_head_t delta_msr_wait; /* Modem status change */ unsigned long flags; /* TTY flags ASY_*/ unsigned char console:1, /* port is a console */ @@ -424,6 +423,7 @@ extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, const char *routine); extern const char *tty_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); +extern int __tty_check_change(struct tty_struct *tty, int sig); extern int tty_check_change(struct tty_struct *tty); extern void __stop_tty(struct tty_struct *tty); extern void stop_tty(struct tty_struct *tty); @@ -467,6 +467,8 @@ extern void tty_buffer_free_all(struct tty_port *port); extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); extern void tty_buffer_init(struct tty_port *port); extern void tty_buffer_set_lock_subclass(struct tty_port *port); +extern bool tty_buffer_restart_work(struct tty_port *port); +extern bool tty_buffer_cancel_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, @@ -656,50 +658,6 @@ extern void __lockfunc tty_unlock(struct tty_struct *tty); extern void __lockfunc tty_lock_slave(struct tty_struct *tty); extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); extern void tty_set_lock_subclass(struct tty_struct *tty); -/* - * this shall be called only from where BTM is held (like close) - * - * We need this to ensure nobody waits for us to finish while we are waiting. - * Without this we were encountering system stalls. - * - * This should be indeed removed with BTM removal later. - * - * Locking: BTM required. Nobody is allowed to hold port->mutex. - */ -static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, - long timeout) -{ - tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */ - tty_wait_until_sent(tty, timeout); - tty_lock(tty); -} - -/* - * wait_event_interruptible_tty -- wait for a condition with the tty lock held - * - * The condition we are waiting for might take a long time to - * become true, or might depend on another thread taking the - * BTM. In either case, we need to drop the BTM to guarantee - * forward progress. This is a leftover from the conversion - * from the BKL and should eventually get removed as the BTM - * falls out of use. - * - * Do not use in new code. - */ -#define wait_event_interruptible_tty(tty, wq, condition) \ -({ \ - int __ret = 0; \ - if (!(condition)) \ - __ret = __wait_event_interruptible_tty(tty, wq, \ - condition); \ - __ret; \ -}) - -#define __wait_event_interruptible_tty(tty, wq, condition) \ - ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ - tty_unlock(tty); \ - schedule(); \ - tty_lock(tty)) #ifdef CONFIG_PROC_FS extern void proc_tty_register_driver(struct tty_driver *); diff --git a/include/linux/types.h b/include/linux/types.h index c314989d9158..70d8500bddf1 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -205,11 +205,25 @@ struct ustat { * struct callback_head - callback structure for use with RCU and task_work * @next: next update requests in a list * @func: actual update function to call after the grace period. + * + * The struct is aligned to size of pointer. On most architectures it happens + * naturally due ABI requirements, but some architectures (like CRIS) have + * weird ABI and we need to ask it explicitly. + * + * The alignment is required to guarantee that bits 0 and 1 of @next will be + * clear under normal conditions -- as long as we use call_rcu(), + * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. + * + * This guarantee is important for few reasons: + * - future call_rcu_lazy() will make use of lower bits in the pointer; + * - the structure shares storage spacer in struct page with @compound_head, + * which encode PageTail() in bit 0. The guarantee is needed to avoid + * false-positive PageTail(). */ struct callback_head { struct callback_head *next; void (*func)(struct callback_head *head); -}; +} __attribute__((aligned(sizeof(void *)))); #define rcu_head callback_head typedef void (*rcu_callback_t)(struct rcu_head *head); diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d6f2c2c5b043..558129af828a 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -75,36 +75,6 @@ static inline unsigned long __copy_from_user_nocache(void *to, #endif /* ARCH_HAS_NOCACHE_UACCESS */ -/** - * probe_kernel_address(): safely attempt to read from a location - * @addr: address to read from - its type is type typeof(retval)* - * @retval: read into this variable - * - * Safely read from address @addr into variable @revtal. If a kernel fault - * happens, handle that and return -EFAULT. - * We ensure that the __get_user() is executed in atomic context so that - * do_page_fault() doesn't attempt to take mmap_sem. This makes - * probe_kernel_address() suitable for use within regions where the caller - * already holds mmap_sem, or other locks which nest inside mmap_sem. - * This must be a macro because __get_user() needs to know the types of the - * args. - * - * We don't include enough header files to be able to do the set_fs(). We - * require that the probe_kernel_address() caller will do that. - */ -#define probe_kernel_address(addr, retval) \ - ({ \ - long ret; \ - mm_segment_t old_fs = get_fs(); \ - \ - set_fs(KERNEL_DS); \ - pagefault_disable(); \ - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ - pagefault_enable(); \ - set_fs(old_fs); \ - ret; \ - }) - /* * probe_kernel_read(): safely attempt to read from a location * @dst: pointer to the buffer that shall take the data @@ -131,4 +101,14 @@ extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +/** + * probe_kernel_address(): safely attempt to read from a location + * @addr: address to read from + * @retval: read into this variable + * + * Returns 0 on success, or -EFAULT. + */ +#define probe_kernel_address(addr, retval) \ + probe_kernel_read(&retval, addr, sizeof(retval)) + #endif /* __LINUX_UACCESS_H__ */ diff --git a/include/linux/usb.h b/include/linux/usb.h index 447fe29b55b4..b9a28074210f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -122,6 +122,8 @@ enum usb_interface_condition { * has been deferred. * @needs_binding: flag set when the driver should be re-probed or unbound * following a reset or suspend operation it doesn't support. + * @authorized: This allows to (de)authorize individual interfaces instead + * a whole device in contrast to the device authorization. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. @@ -178,6 +180,7 @@ struct usb_interface { unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ unsigned needs_binding:1; /* needs delayed unbind/rebind */ unsigned resetting_device:1; /* true: bandwidth alloc after reset */ + unsigned authorized:1; /* used for interface authorization */ struct device dev; /* interface specific device info */ struct device *usb_dev; @@ -325,6 +328,7 @@ struct usb_host_bos { /* wireless cap descriptor is handled by wusb */ struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; }; diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h new file mode 100644 index 000000000000..b5706f94ee9e --- /dev/null +++ b/include/linux/usb/cdc.h @@ -0,0 +1,51 @@ +/* + * USB CDC common helpers + * + * Copyright (c) 2015 Oliver Neukum <oneukum@suse.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#ifndef __LINUX_USB_CDC_H +#define __LINUX_USB_CDC_H + +#include <uapi/linux/usb/cdc.h> + +/* + * inofficial magic numbers + */ + +#define CDC_PHONET_MAGIC_NUMBER 0xAB + +/* + * parsing CDC headers + */ + +struct usb_cdc_parsed_header { + struct usb_cdc_union_desc *usb_cdc_union_desc; + struct usb_cdc_header_desc *usb_cdc_header_desc; + + struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; + struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; + struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; + struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; + struct usb_cdc_ether_desc *usb_cdc_ether_desc; + struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; + struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; + struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; + struct usb_cdc_obex_desc *usb_cdc_obex_desc; + struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; + struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; + struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; + + bool phonet_magic_present; +}; + +struct usb_interface; +int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, + struct usb_interface *intf, + u8 *buffer, + int buflen); + +#endif /* __LINUX_USB_CDC_H */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 27603bcbb9b9..6cc96bb12ddc 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -32,9 +32,9 @@ #ifndef __LINUX_USB_CH9_H #define __LINUX_USB_CH9_H +#include <linux/device.h> #include <uapi/linux/usb/ch9.h> - /** * usb_speed_string() - Returns human readable-name of the speed. * @speed: The speed to return human-readable name for. If it's not @@ -43,6 +43,15 @@ */ extern const char *usb_speed_string(enum usb_device_speed speed); +/** + * usb_get_maximum_speed - Get maximum requested speed for a given USB + * controller. + * @dev: Pointer to the given USB controller device + * + * The function gets the maximum speed string from property "maximum-speed", + * and returns the corresponding enum usb_device_speed. + */ +extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); /** * usb_state_string - Returns human readable name for the state. diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index a41833cd184c..5dd75fa47dd8 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -5,9 +5,28 @@ #ifndef __LINUX_USB_CHIPIDEA_H #define __LINUX_USB_CHIPIDEA_H +#include <linux/extcon.h> #include <linux/usb/otg.h> struct ci_hdrc; + +/** + * struct ci_hdrc_cable - structure for external connector cable state tracking + * @state: current state of the line + * @changed: set to true when extcon event happen + * @edev: device which generate events + * @ci: driver state of the chipidea device + * @nb: hold event notification callback + * @conn: used for notification registration + */ +struct ci_hdrc_cable { + bool state; + bool changed; + struct extcon_dev *edev; + struct ci_hdrc *ci; + struct notifier_block nb; +}; + struct ci_hdrc_platform_data { const char *name; /* offset of the capability registers */ @@ -48,6 +67,11 @@ struct ci_hdrc_platform_data { u32 ahb_burst_config; u32 tx_burst_size; u32 rx_burst_size; + + /* VBUS and ID signal state tracking, using extcon framework */ + struct ci_hdrc_cable vbus_extcon; + struct ci_hdrc_cable id_extcon; + u32 phy_clkgate_delay_us; }; /* Default offset of capability registers */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index c14a69b36d27..3d583a10b926 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -215,6 +215,7 @@ struct usb_ep { struct list_head ep_list; struct usb_ep_caps caps; bool claimed; + bool enabled; unsigned maxpacket:16; unsigned maxpacket_limit:16; unsigned max_streams:16; @@ -264,7 +265,18 @@ static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, */ static inline int usb_ep_enable(struct usb_ep *ep) { - return ep->ops->enable(ep, ep->desc); + int ret; + + if (ep->enabled) + return 0; + + ret = ep->ops->enable(ep, ep->desc); + if (ret) + return ret; + + ep->enabled = true; + + return 0; } /** @@ -281,7 +293,18 @@ static inline int usb_ep_enable(struct usb_ep *ep) */ static inline int usb_ep_disable(struct usb_ep *ep) { - return ep->ops->disable(ep); + int ret; + + if (!ep->enabled) + return 0; + + ret = ep->ops->disable(ep); + if (ret) + return ret; + + ep->enabled = false; + + return 0; } /** @@ -1233,6 +1256,8 @@ extern struct usb_ep *usb_ep_autoconfig_ss(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); +extern void usb_ep_autoconfig_release(struct usb_ep *); + extern void usb_ep_autoconfig_reset(struct usb_gadget *); #endif /* __LINUX_USB_GADGET_H */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index d2784c10bfe2..f89c24bd53a4 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -58,12 +58,6 @@ * * Since "struct usb_bus" is so thin, you can't share much code in it. * This framework is a layer over that, and should be more sharable. - * - * @authorized_default: Specifies if new devices are authorized to - * connect by default or they require explicit - * user space authorization; this bit is settable - * through /sys/class/usb_host/X/authorized_default. - * For the rest is RO, so we don't lock to r/w it. */ /*-------------------------------------------------------------------------*/ @@ -120,6 +114,8 @@ struct usb_hcd { #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ +#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ +#define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). @@ -131,6 +127,22 @@ struct usb_hcd { #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) + /* + * Specifies if interfaces are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/interface_authorized_default + */ +#define HCD_INTF_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) + + /* + * Specifies if devices are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/authorized_default + */ +#define HCD_DEV_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED)) + /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ @@ -141,7 +153,6 @@ struct usb_hcd { * support the new root-hub polling mechanism. */ unsigned uses_new_polling:1; unsigned wireless:1; /* Wireless USB HCD */ - unsigned authorized_default:1; unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ @@ -239,6 +250,7 @@ struct hc_driver { #define HCD_USB2 0x0020 /* USB 2.0 */ #define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ #define HCD_USB3 0x0040 /* USB 3.0 */ +#define HCD_USB31 0x0050 /* USB 3.1 */ #define HCD_MASK 0x0070 #define HCD_BH 0x0100 /* URB complete in BH context */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index a4ee1b582183..fa6dc132bd1b 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -95,7 +95,7 @@ struct musb_hdrc_config { /* musb CLKIN in Blackfin in MHZ */ unsigned char clkin; #endif - + u32 maximum_speed; }; struct musb_hdrc_platform_data { diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index 8c5a818ec244..c3fe9e48ce27 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,22 +12,10 @@ #include <linux/usb/phy.h> #if IS_ENABLED(CONFIG_OF) -enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); -enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); #else -static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) -{ - return USB_DR_MODE_UNKNOWN; -} - -static inline enum usb_device_speed -of_usb_get_maximum_speed(struct device_node *np) -{ - return USB_SPEED_UNKNOWN; -} static inline bool of_usb_host_tpl_support(struct device_node *np) { return false; diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index bd1dcf816100..67929df86df5 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -119,4 +119,13 @@ enum usb_dr_mode { USB_DR_MODE_OTG, }; +/** + * usb_get_dr_mode - Get dual role mode for given device + * @dev: Pointer to the given device + * + * The function gets phy interface string from property 'dr_mode', + * and returns the correspondig enum usb_dr_mode + */ +extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); + #endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index e39f251cf861..31a8068c42a5 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -63,7 +63,7 @@ enum usb_otg_state { struct usb_phy; struct usb_otg; -/* for transceivers connected thru an ULPI interface, the user must +/* for phys connected thru an ULPI interface, the user must * provide access ops */ struct usb_phy_io_ops { @@ -92,10 +92,10 @@ struct usb_phy { u16 port_status; u16 port_change; - /* to support controllers that have multiple transceivers */ + /* to support controllers that have multiple phys */ struct list_head head; - /* initialize/shutdown the OTG controller */ + /* initialize/shutdown the phy */ int (*init)(struct usb_phy *x); void (*shutdown)(struct usb_phy *x); @@ -106,7 +106,7 @@ struct usb_phy { int (*set_power)(struct usb_phy *x, unsigned mA); - /* Set transceiver into suspend mode */ + /* Set phy into suspend mode */ int (*set_suspend)(struct usb_phy *x, int suspend); diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ - u32 type; + uintptr_t type; u32 enable_gpio; /* diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b483abd34493..69e1d4a1f1b3 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -1,10 +1,31 @@ /* + * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs + * * Copyright (c) 2010 Red Hat Inc. * Author : Dave Airlie <airlied@redhat.com> * - * Licensed under GPLv2 + * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS + * IN THE SOFTWARE. * - * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs */ #ifndef _LINUX_VGA_SWITCHEROO_H_ @@ -14,28 +35,85 @@ struct pci_dev; +/** + * enum vga_switcheroo_state - client power state + * @VGA_SWITCHEROO_OFF: off + * @VGA_SWITCHEROO_ON: on + * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo. + * Only used in vga_switcheroo_get_client_state() which in turn is only + * called from hda_intel.c + * + * Client power state. + */ enum vga_switcheroo_state { VGA_SWITCHEROO_OFF, VGA_SWITCHEROO_ON, /* below are referred only from vga_switcheroo_get_client_state() */ - VGA_SWITCHEROO_INIT, VGA_SWITCHEROO_NOT_FOUND, }; +/** + * enum vga_switcheroo_client_id - client identifier + * @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients. + * Determining the id requires the handler, so GPUs are given their + * true id in a delayed fashion in vga_switcheroo_enable() + * @VGA_SWITCHEROO_IGD: integrated graphics device + * @VGA_SWITCHEROO_DIS: discrete graphics device + * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported + * + * Client identifier. Audio clients use the same identifier & 0x100. + */ enum vga_switcheroo_client_id { + VGA_SWITCHEROO_UNKNOWN_ID = -1, VGA_SWITCHEROO_IGD, VGA_SWITCHEROO_DIS, VGA_SWITCHEROO_MAX_CLIENTS, }; +/** + * struct vga_switcheroo_handler - handler callbacks + * @init: initialize handler. + * Optional. This gets called when vga_switcheroo is enabled, i.e. when + * two vga clients have registered. It allows the handler to perform + * some delayed initialization that depends on the existence of the + * vga clients. Currently only the radeon and amdgpu drivers use this. + * The return value is ignored + * @switchto: switch outputs to given client. + * Mandatory. For muxless machines this should be a no-op. Returning 0 + * denotes success, anything else failure (in which case the switch is + * aborted) + * @power_state: cut or reinstate power of given client. + * Optional. The return value is ignored + * @get_client_id: determine if given pci device is integrated or discrete GPU. + * Mandatory + * + * Handler callbacks. The multiplexer itself. The @switchto and @get_client_id + * methods are mandatory, all others may be set to NULL. + */ struct vga_switcheroo_handler { + int (*init)(void); int (*switchto)(enum vga_switcheroo_client_id id); int (*power_state)(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state); - int (*init)(void); - int (*get_client_id)(struct pci_dev *pdev); + enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev); }; +/** + * struct vga_switcheroo_client_ops - client callbacks + * @set_gpu_state: do the equivalent of suspend/resume for the card. + * Mandatory. This should not cut power to the discrete GPU, + * which is the job of the handler + * @reprobe: poll outputs. + * Optional. This gets called after waking the GPU and switching + * the outputs to it + * @can_switch: check if the device is in a position to switch now. + * Mandatory. The client should return false if a user space process + * has one of its device files open + * + * Client callbacks. A client can be either a GPU or an audio device on a GPU. + * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be + * set to NULL. For audio clients, the @reprobe member is bogus. + */ struct vga_switcheroo_client_ops { void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); void (*reprobe)(struct pci_dev *dev); @@ -49,17 +127,17 @@ int vga_switcheroo_register_client(struct pci_dev *dev, bool driver_power_control); int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, - int id, bool active); + enum vga_switcheroo_client_id id); void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info); -int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler); +int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler); void vga_switcheroo_unregister_handler(void); int vga_switcheroo_process_delayed_switch(void); -int vga_switcheroo_get_client_state(struct pci_dev *dev); +enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); @@ -72,13 +150,13 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} static inline int vga_switcheroo_register_client(struct pci_dev *dev, const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} -static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } +static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; } static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, - int id, bool active) { return 0; } + enum vga_switcheroo_client_id id) { return 0; } static inline void vga_switcheroo_unregister_handler(void) {} static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } -static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } +static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 9246d32dc973..e623d392db0c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -14,12 +14,12 @@ #endif #ifdef CONFIG_HIGHMEM -#define HIGHMEM_ZONE(xx) , xx##_HIGH +#define HIGHMEM_ZONE(xx) xx##_HIGH, #else #define HIGHMEM_ZONE(xx) #endif -#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE +#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0ec598381f97..3bff87a25a42 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -182,22 +182,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) # endif #endif -struct vmalloc_info { - unsigned long used; - unsigned long largest_chunk; -}; - #ifdef CONFIG_MMU #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) -extern void get_vmalloc_info(struct vmalloc_info *vmi); #else - #define VMALLOC_TOTAL 0UL -#define get_vmalloc_info(vmi) \ -do { \ - (vmi)->used = 0; \ - (vmi)->largest_chunk = 0; \ -} while (0) #endif #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vme.h b/include/linux/vme.h index c0131358f351..71e4a6dec5ac 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h @@ -81,6 +81,9 @@ struct vme_resource { extern struct bus_type vme_bus_type; +/* Number of VME interrupt vectors */ +#define VME_NUM_STATUSID 256 + /* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ #define VME_MAX_BRIDGES (sizeof(unsigned int)*8) #define VME_MAX_SLOTS 32 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 82e7db7f7100..5dbc8b0ee567 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -161,30 +161,8 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, } #ifdef CONFIG_NUMA -/* - * Determine the per node value of a stat item. This function - * is called frequently in a NUMA machine, so try to be as - * frugal as possible. - */ -static inline unsigned long node_page_state(int node, - enum zone_stat_item item) -{ - struct zone *zones = NODE_DATA(node)->node_zones; - - return -#ifdef CONFIG_ZONE_DMA - zone_page_state(&zones[ZONE_DMA], item) + -#endif -#ifdef CONFIG_ZONE_DMA32 - zone_page_state(&zones[ZONE_DMA32], item) + -#endif -#ifdef CONFIG_HIGHMEM - zone_page_state(&zones[ZONE_HIGHMEM], item) + -#endif - zone_page_state(&zones[ZONE_NORMAL], item) + - zone_page_state(&zones[ZONE_MOVABLE], item); -} +extern unsigned long node_page_state(int node, enum zone_stat_item item); extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #else @@ -269,7 +247,6 @@ static inline void __dec_zone_page_state(struct page *page, #define set_pgdat_percpu_threshold(pgdat, callback) { } -static inline void refresh_cpu_vm_stats(int cpu) { } static inline void refresh_zone_stat_thresholds(void) { } static inline void cpu_vm_stats_fold(int cpu) { } diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index d74a0e907b9e..027b1f43f12d 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -24,8 +24,8 @@ struct watchdog_device; * @stop: The routine for stopping the watchdog device. * @ping: The routine that sends a keepalive ping to the watchdog device. * @status: The routine that shows the status of the watchdog device. - * @set_timeout:The routine for setting the watchdog devices timeout value. - * @get_timeleft:The routine that get's the time that's left before a reset. + * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds). + * @get_timeleft:The routine that gets the time left before a reset (in seconds). * @ref: The ref operation for dyn. allocated watchdog_device structs * @unref: The unref operation for dyn. allocated watchdog_device structs * @ioctl: The routines that handles extra ioctl calls. @@ -33,7 +33,7 @@ struct watchdog_device; * The watchdog_ops structure contains a list of low-level operations * that control a watchdog device. It also contains the module that owns * these operations. The start and stop function are mandatory, all other - * functions are optonal. + * functions are optional. */ struct watchdog_ops { struct module *owner; @@ -59,9 +59,9 @@ struct watchdog_ops { * @info: Pointer to a watchdog_info structure. * @ops: Pointer to the list of watchdog operations. * @bootstatus: Status of the watchdog device at boot. - * @timeout: The watchdog devices timeout value. - * @min_timeout:The watchdog devices minimum timeout value. - * @max_timeout:The watchdog devices maximum timeout value. + * @timeout: The watchdog devices timeout value (in seconds). + * @min_timeout:The watchdog devices minimum timeout value (in seconds). + * @max_timeout:The watchdog devices maximum timeout value (in seconds). * @driver-data:Pointer to the drivers private data. * @lock: Lock for watchdog core internal use only. * @status: Field that contains the devices internal status bits. @@ -119,8 +119,15 @@ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool noway /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { - return ((wdd->max_timeout != 0) && - (t < wdd->min_timeout || t > wdd->max_timeout)); + /* + * The timeout is invalid if + * - the requested value is smaller than the configured minimum timeout, + * or + * - a maximum timeout is configured, and the requested value is larger + * than the maximum timeout. + */ + return t < wdd->min_timeout || + (wdd->max_timeout && t > wdd->max_timeout); } /* Use the following functions to manipulate watchdog driver specific data */ diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 42f8ec992452..2e97b7707dff 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -38,10 +38,10 @@ enum zpool_mapmode { bool zpool_has_pool(char *type); -struct zpool *zpool_create_pool(char *type, char *name, +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, const struct zpool_ops *ops); -char *zpool_get_type(struct zpool *pool); +const char *zpool_get_type(struct zpool *pool); void zpool_destroy_pool(struct zpool *pool); @@ -83,7 +83,9 @@ struct zpool_driver { atomic_t refcount; struct list_head list; - void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops, + void *(*create)(const char *name, + gfp_t gfp, + const struct zpool_ops *ops, struct zpool *zpool); void (*destroy)(void *pool); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 6398dfae53f1..34eb16098a33 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -41,7 +41,7 @@ struct zs_pool_stats { struct zs_pool; -struct zs_pool *zs_create_pool(char *name, gfp_t flags); +struct zs_pool *zs_create_pool(const char *name, gfp_t flags); void zs_destroy_pool(struct zs_pool *pool); unsigned long zs_malloc(struct zs_pool *pool, size_t size); diff --git a/include/linux/zutil.h b/include/linux/zutil.h index 6adfa9a6ffe9..663689521759 100644 --- a/include/linux/zutil.h +++ b/include/linux/zutil.h @@ -68,10 +68,10 @@ typedef uLong (*check_func) (uLong check, const Byte *buf, An Adler-32 checksum is almost as reliable as a CRC32 but can be computed much faster. Usage example: - uLong adler = adler32(0L, NULL, 0); + uLong adler = zlib_adler32(0L, NULL, 0); while (read_buffer(buffer, length) != EOF) { - adler = adler32(adler, buffer, length); + adler = zlib_adler32(adler, buffer, length); } if (adler != original_adler) error(); */ |