diff options
Diffstat (limited to 'include/linux')
269 files changed, 5664 insertions, 1814 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 06ed7e54033e..288fac5294f5 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -190,14 +190,6 @@ static inline int acpi_debugger_notify_command_complete(void) } #endif -#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE -void acpi_initrd_override(void *data, size_t size); -#else -static inline void acpi_initrd_override(void *data, size_t size) -{ -} -#endif - #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) @@ -216,6 +208,7 @@ void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); +void early_acpi_table_init(void *data, size_t size); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init acpi_parse_entries(char *id, unsigned long table_size, @@ -278,6 +271,7 @@ void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; extern u32 acpi_irq_not_handled; extern unsigned int acpi_sci_irq; +extern bool acpi_no_s5; #define INVALID_ACPI_IRQ ((unsigned)-1) static inline bool acpi_sci_irq_valid(void) { @@ -311,7 +305,6 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); -void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); @@ -359,7 +352,6 @@ extern bool wmi_has_guid(const char *guid); extern char acpi_video_backlight_string[]; extern long acpi_is_video_device(acpi_handle handle); extern int acpi_blacklisted(void); -extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); @@ -596,6 +588,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) return NULL; } +static inline void early_acpi_table_init(void *data, size_t size) { } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 10fe2a211c2e..27e9ec8778eb 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -86,7 +86,7 @@ struct pl08x_channel_data { * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 */ struct pl08x_platform_data { - const struct pl08x_channel_data *slave_channels; + struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; int (*get_xfer_signal)(const struct pl08x_channel_data *); diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h index b2d32e01dfe4..714186de8c36 100644 --- a/include/linux/apple-gmux.h +++ b/include/linux/apple-gmux.h @@ -35,7 +35,7 @@ */ static inline bool apple_gmux_present(void) { - return acpi_dev_present(GMUX_ACPI_HID); + return acpi_dev_found(GMUX_ACPI_HID); } #else /* !CONFIG_APPLE_GMUX */ diff --git a/include/linux/ata.h b/include/linux/ata.h index c1a2f345cbe6..99346be5a7ca 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -243,6 +243,7 @@ enum { ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, ATA_CMD_FPDMA_READ = 0x60, ATA_CMD_FPDMA_WRITE = 0x61, + ATA_CMD_NCQ_NON_DATA = 0x63, ATA_CMD_FPDMA_SEND = 0x64, ATA_CMD_FPDMA_RECV = 0x65, ATA_CMD_PIO_READ = 0x20, @@ -301,19 +302,43 @@ enum { ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, ATA_CMD_REQ_SENSE_DATA = 0x0B, ATA_CMD_SANITIZE_DEVICE = 0xB4, + ATA_CMD_ZAC_MGMT_IN = 0x4A, + ATA_CMD_ZAC_MGMT_OUT = 0x9F, /* marked obsolete in the ATA/ATAPI-7 spec */ ATA_CMD_RESTORE = 0x10, + /* Subcmds for ATA_CMD_FPDMA_RECV */ + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 0x02, + /* Subcmds for ATA_CMD_FPDMA_SEND */ ATA_SUBCMD_FPDMA_SEND_DSM = 0x00, ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02, + /* Subcmds for ATA_CMD_NCQ_NON_DATA */ + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0x00, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 0x06, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07, + + /* Subcmds for ATA_CMD_ZAC_MGMT_IN */ + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00, + + /* Subcmds for ATA_CMD_ZAC_MGMT_OUT */ + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04, + /* READ_LOG_EXT pages */ + ATA_LOG_DIRECTORY = 0x0, ATA_LOG_SATA_NCQ = 0x10, + ATA_LOG_NCQ_NON_DATA = 0x12, ATA_LOG_NCQ_SEND_RECV = 0x13, ATA_LOG_SATA_ID_DEV_DATA = 0x30, ATA_LOG_SATA_SETTINGS = 0x08, + ATA_LOG_ZONED_INFORMATION = 0x09, ATA_LOG_DEVSLP_OFFSET = 0x30, ATA_LOG_DEVSLP_SIZE = 0x08, ATA_LOG_DEVSLP_MDAT = 0x00, @@ -328,8 +353,25 @@ enum { ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04, ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0), ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = (1 << 0), ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C, - ATA_LOG_NCQ_SEND_RECV_SIZE = 0x10, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 0x10, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1), + ATA_LOG_NCQ_SEND_RECV_SIZE = 0x14, + + /* NCQ Non-Data log */ + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = (1 << 0), + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = (1 << 1), + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = (1 << 2), + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3), + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = (1 << 4), + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 0x1C, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = (1 << 0), + ATA_LOG_NCQ_NON_DATA_SIZE = 0x40, /* READ/WRITE LONG (obsolete) */ ATA_CMD_READ_LONG = 0x22, @@ -371,7 +413,8 @@ enum { SETFEATURES_AAM_ON = 0x42, SETFEATURES_AAM_OFF = 0xC2, - SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ + SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ + SETFEATURES_SPINUP_TIMEOUT = 30000, /* 30s timeout for drive spin-up from PUIS */ SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ @@ -385,6 +428,8 @@ enum { SATA_SSP = 0x06, /* Software Settings Preservation */ SATA_DEVSLP = 0x09, /* Device Sleep */ + SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ + /* feature values for SET_MAX */ ATA_SET_MAX_ADDR = 0x00, ATA_SET_MAX_PASSWD = 0x01, @@ -528,6 +573,8 @@ struct ata_bmdma_prd { #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) #define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) +#define ata_id_has_ncq_autosense(id) \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) static inline bool ata_id_has_hipm(const u16 *id) { @@ -716,6 +763,20 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id) return false; } +static inline bool ata_id_has_sense_reporting(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_3] & (1 << 6); +} + +static inline bool ata_id_sense_reporting_enabled(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_4] & (1 << 6); +} + /** * ata_id_major_version - get ATA level of drive * @id: Identify data @@ -820,6 +881,11 @@ static inline bool ata_id_has_ncq_send_and_recv(const u16 *id) return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6); } +static inline bool ata_id_has_ncq_non_data(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); +} + static inline bool ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && @@ -871,6 +937,11 @@ static inline bool ata_id_is_ssd(const u16 *id) return id[ATA_ID_ROT_SPEED] == 0x01; } +static inline u8 ata_id_zoned_cap(const u16 *id) +{ + return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3); +} + static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio) { /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index 33eb274cd0e6..e66153d60bd5 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,6 +31,10 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + u32 bt_active_pin; + u32 bt_priority_pin; + u32 wlan_active_pin; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; diff --git a/include/linux/audit.h b/include/linux/audit.h index e38e3fc13ea8..961a417d641e 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -26,6 +26,7 @@ #include <linux/sched.h> #include <linux/ptrace.h> #include <uapi/linux/audit.h> +#include <linux/tty.h> #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) @@ -347,6 +348,23 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) return tsk->sessionid; } +static inline struct tty_struct *audit_get_tty(struct task_struct *tsk) +{ + struct tty_struct *tty = NULL; + unsigned long flags; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + if (tsk->signal) + tty = tty_kref_get(tsk->signal->tty); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); + return tty; +} + +static inline void audit_put_tty(struct tty_struct *tty) +{ + tty_kref_put(tty); +} + extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern void __audit_bprm(struct linux_binprm *bprm); @@ -504,6 +522,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return -1; } +static inline struct tty_struct *audit_get_tty(struct task_struct *tsk) +{ + return NULL; +} +static inline void audit_put_tty(struct tty_struct *tty) +{ } static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 1e7a69adbe6f..5f2fd61ef4fb 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -141,9 +141,10 @@ extern void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd); extern void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); -extern bool backlight_device_registered(enum backlight_type type); extern int backlight_register_notifier(struct notifier_block *nb); extern int backlight_unregister_notifier(struct notifier_block *nb); +extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h new file mode 100644 index 000000000000..c06b47c84e1a --- /dev/null +++ b/include/linux/bcm47xx_sprom.h @@ -0,0 +1,24 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __BCM47XX_SPROM_H +#define __BCM47XX_SPROM_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/vmalloc.h> + +#ifdef CONFIG_BCM47XX_SPROM +int bcm47xx_sprom_register_fallbacks(void); +#else +static inline int bcm47xx_sprom_register_fallbacks(void) +{ + return -ENOTSUPP; +}; +#endif + +#endif /* __BCM47XX_SPROM_H */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0367c63f5960..e6b41f42602b 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -4,6 +4,7 @@ #include <linux/pci.h> #include <linux/mod_devicetable.h> +#include <linux/bcma/bcma_driver_arm_c9.h> #include <linux/bcma/bcma_driver_chipcommon.h> #include <linux/bcma/bcma_driver_pci.h> #include <linux/bcma/bcma_driver_pcie2.h> diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h new file mode 100644 index 000000000000..93bd73d670d5 --- /dev/null +++ b/include/linux/bcma/bcma_driver_arm_c9.h @@ -0,0 +1,15 @@ +#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_ +#define LINUX_BCMA_DRIVER_ARM_C9_H_ + +/* DMU (Device Management Unit) */ +#define BCMA_DMU_CRU_USB2_CONTROL 0x0164 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK 0x00000FFC +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT 2 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK 0x00007000 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT 12 +#define BCMA_DMU_CRU_CLKSET_KEY 0x0180 +#define BCMA_DMU_CRU_STRAPS_CTRL 0x02A0 +#define BCMA_DMU_CRU_STRAPS_CTRL_USB3 0x00000010 +#define BCMA_DMU_CRU_STRAPS_CTRL_4BYTE 0x00008000 + +#endif /* LINUX_BCMA_DRIVER_ARM_C9_H_ */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 846513c73606..a5ac2cad5cb7 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -587,7 +587,6 @@ struct mtd_info; struct bcma_sflash { bool present; - u32 window; u32 blocksize; u16 numblocks; u32 size; diff --git a/include/linux/bio.h b/include/linux/bio.h index 6b7481f62218..9faebf7f9a33 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -703,6 +703,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl) } /* + * Increment chain count for the bio. Make sure the CHAIN flag update + * is visible before the raised count. + */ +static inline void bio_inc_remaining(struct bio *bio) +{ + bio_set_flag(bio, BIO_CHAIN); + smp_mb__before_atomic(); + atomic_inc(&bio->__bi_remaining); +} + +/* * bio_set is used to allow other portions of the IO system to * allocate their own private memory pools for bio and iovec structures. * These memory pools in turn all allocate from the bio_slab diff --git a/include/linux/bitops.h b/include/linux/bitops.h index defeaac0745f..299e76b59fe9 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -227,6 +227,22 @@ static inline unsigned long __ffs64(u64 word) }) #endif +#ifndef bit_clear_unless +#define bit_clear_unless(ptr, _clear, _test) \ +({ \ + const typeof(*ptr) clear = (_clear), test = (_test); \ + typeof(*ptr) old, new; \ + \ + do { \ + old = ACCESS_ONCE(*ptr); \ + new = old & ~clear; \ + } while (!(old & test) && \ + cmpxchg(ptr, old, new) != old); \ + \ + !(old & test); \ +}) +#endif + #ifndef find_last_bit /** * find_last_bit - find the last set bit in a memory region diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9ac9799b702b..2498fdf3a503 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -238,8 +238,8 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, - void *priv); +void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, + busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 86a38ea1823f..77e5d81f07aa 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -208,7 +208,7 @@ enum rq_flag_bits { #define REQ_COMMON_MASK \ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ - REQ_SECURE | REQ_INTEGRITY) + REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) #define REQ_CLONE_MASK REQ_COMMON_MASK #define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 669e419d6234..1fd8fdff2f81 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -433,8 +433,6 @@ struct request_queue { /* * for flush operations */ - unsigned int flush_flags; - unsigned int flush_not_queueable:1; struct blk_flush_queue *fq; struct list_head requeue_list; @@ -491,6 +489,9 @@ struct request_queue { #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ +#define QUEUE_FLAG_WC 23 /* Write back caching */ +#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ +#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -779,7 +780,7 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *, extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, - unsigned int len); + int offset, unsigned int len); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, @@ -1007,8 +1008,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); -extern void blk_queue_flush(struct request_queue *q, unsigned int flush); extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); +extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); @@ -1128,6 +1129,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); +extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, @@ -1363,7 +1366,7 @@ static inline unsigned int block_size(struct block_device *bdev) static inline bool queue_flush_queueable(struct request_queue *q) { - return !q->flush_not_queueable; + return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); } typedef struct {struct page *v;} Sector; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index afc1343df3c7..0f3172b8b225 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -57,6 +57,14 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...); } while (0) #define BLK_TN_MAX_MSG 128 +static inline bool blk_trace_note_message_enabled(struct request_queue *q) +{ + struct blk_trace *bt = q->blk_trace; + if (likely(!bt)) + return false; + return bt->act_mask & BLK_TC_NOTIFY; +} + extern void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, @@ -79,6 +87,7 @@ extern struct attribute_group blk_trace_attr_group; # define blk_trace_remove(q) (-ENOTTY) # define blk_add_trace_msg(q, fmt, ...) do { } while (0) # define blk_trace_remove_sysfs(dev) do { } while (0) +# define blk_trace_note_message_enabled(q) (false) static inline int blk_trace_init_sysfs(struct device *dev) { return 0; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 35b22f94d2d2..f9be32691718 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -83,34 +83,34 @@ extern void *__alloc_bootmem(unsigned long size, unsigned long goal); extern void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *__alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, - unsigned long limit); + unsigned long limit) __malloc; extern void *__alloc_bootmem_low(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *__alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; #ifdef CONFIG_NO_BOOTMEM /* We are using top down, so it is safe to use 0 here */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f1d5c5acc8dd..8ee27b8afe81 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -66,6 +66,11 @@ enum bpf_arg_type { * functions that access data on eBPF program stack */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ + ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not + * need to be initialized, helper function must fill + * all bytes or clear them in error case. + */ + ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ @@ -131,6 +136,7 @@ struct bpf_prog_type_list { struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; + u32 max_ctx_offset; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; @@ -160,9 +166,12 @@ struct bpf_array { #define MAX_TAIL_CALL_CNT 32 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); +u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_fd_array_map_clear(struct bpf_map *map); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); + const struct bpf_func_proto *bpf_get_trace_printk_proto(void); +const struct bpf_func_proto *bpf_get_event_output_proto(void); #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 735f9f8c4e43..5261751f6bd4 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -40,8 +40,11 @@ struct can_priv { struct can_clock clock; enum can_state state; - u32 ctrlmode; - u32 ctrlmode_supported; + + /* CAN controller features - see include/uapi/linux/can/netlink.h */ + u32 ctrlmode; /* current options setting */ + u32 ctrlmode_supported; /* options that can be modified by netlink */ + u32 ctrlmode_static; /* static enabled options for driver/hardware */ int restart_ms; struct timer_list restart_timer; @@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb) return skb->len == CANFD_MTU; } +/* helper to define static CAN controller features at device creation time */ +static inline void can_set_static_ctrlmode(struct net_device *dev, + u32 static_mode) +{ + struct can_priv *priv = netdev_priv(dev); + + /* alloc_candev() succeeded => netdev_priv() is valid at this point */ + priv->ctrlmode = static_mode; + priv->ctrlmode_static = static_mode; + + /* override MTU which was set by default in can_setup()? */ + if (static_mode & CAN_CTRLMODE_FD) + dev->mtu = CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 915af3095b39..7c2bb27c067c 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -1,9 +1,10 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> + * Author: Gary R Hook <gary.hook@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -381,6 +382,35 @@ struct ccp_passthru_engine { u32 final; }; +/** + * struct ccp_passthru_nomap_engine - CCP pass-through operation + * without performing DMA mapping + * @bit_mod: bitwise operation to perform + * @byte_swap: byteswap operation to perform + * @mask: mask to be applied to data + * @mask_len: length in bytes of mask + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicate final pass-through operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - bit_mod, byte_swap, src, dst, src_len + * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP + */ +struct ccp_passthru_nomap_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + + dma_addr_t mask; + u32 mask_len; /* In bytes */ + + dma_addr_t src_dma, dst_dma; + u64 src_len; /* In bytes */ + + u32 final; +}; + /***** ECC engine *****/ #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ #define CCP_ECC_MAX_OPERANDS 6 @@ -522,7 +552,8 @@ enum ccp_engine { }; /* Flag values for flags member of ccp_cmd */ -#define CCP_CMD_MAY_BACKLOG 0x00000001 +#define CCP_CMD_MAY_BACKLOG 0x00000001 +#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 /** * struct ccp_cmd - CPP operation request @@ -562,6 +593,7 @@ struct ccp_cmd { struct ccp_sha_engine sha; struct ccp_rsa_engine rsa; struct ccp_passthru_engine passthru; + struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; } u; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index da95258127aa..0c72204c75fc 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -32,6 +32,7 @@ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ #define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ #define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */ +#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ struct clk; struct clk_hw; @@ -282,10 +283,17 @@ extern const struct clk_ops clk_fixed_rate_ops; struct clk *clk_register_fixed_rate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate); +struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate, unsigned long fixed_accuracy); void clk_unregister_fixed_rate(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); + void of_fixed_clk_setup(struct device_node *np); /** @@ -326,7 +334,12 @@ struct clk *clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); void clk_unregister_gate(struct clk *clk); +void clk_hw_unregister_gate(struct clk_hw *hw); struct clk_div_table { unsigned int val; @@ -407,12 +420,22 @@ struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider_table(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); void clk_unregister_divider(struct clk *clk); +void clk_hw_unregister_divider(struct clk_hw *hw); /** * struct clk_mux - multiplexer clock @@ -463,14 +486,25 @@ struct clk *clk_register_mux(struct device *dev, const char *name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); void clk_unregister_mux(struct clk *clk); +void clk_hw_unregister_mux(struct clk_hw *hw); void of_fixed_factor_clk_setup(struct device_node *node); @@ -499,6 +533,10 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); void clk_unregister_fixed_factor(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); +void clk_hw_unregister_fixed_factor(struct clk_hw *hw); /** * struct clk_fractional_divider - adjustable fractional divider clock @@ -533,6 +571,11 @@ struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); +void clk_hw_unregister_fractional_divider(struct clk_hw *hw); /** * struct clk_multiplier - adjustable multiplier clock @@ -603,6 +646,14 @@ struct clk *clk_register_composite(struct device *dev, const char *name, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags); +void clk_unregister_composite(struct clk *clk); +struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); +void clk_hw_unregister_composite(struct clk_hw *hw); /*** * struct clk_gpio_gate - gpio gated clock @@ -625,6 +676,10 @@ extern const struct clk_ops clk_gpio_gate_ops; struct clk *clk_register_gpio_gate(struct device *dev, const char *name, const char *parent_name, unsigned gpio, bool active_low, unsigned long flags); +struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, unsigned gpio, bool active_low, + unsigned long flags); +void clk_hw_unregister_gpio_gate(struct clk_hw *hw); /** * struct clk_gpio_mux - gpio controlled clock multiplexer @@ -640,6 +695,10 @@ extern const struct clk_ops clk_gpio_mux_ops; struct clk *clk_register_gpio_mux(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned gpio, bool active_low, unsigned long flags); +struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low, unsigned long flags); +void clk_hw_unregister_gpio_mux(struct clk_hw *hw); /** * clk_register - allocate a new clock, register it and return an opaque cookie @@ -655,9 +714,15 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name, struct clk *clk_register(struct device *dev, struct clk_hw *hw); struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw); +int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw); +int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw); + void clk_unregister(struct clk *clk); void devm_clk_unregister(struct device *dev, struct clk *clk); +void clk_hw_unregister(struct clk_hw *hw); +void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); + /* helper functions */ const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); @@ -703,6 +768,11 @@ struct clk_onecell_data { unsigned int clk_num; }; +struct clk_hw_onecell_data { + size_t num; + struct clk_hw *hws[]; +}; + extern struct of_device_id __clk_of_table; #define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) @@ -712,15 +782,24 @@ int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, void *data), void *data); +int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data); void of_clk_del_provider(struct device_node *np); struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, + void *data); struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec, + void *data); unsigned int of_clk_get_parent_count(struct device_node *np); int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size); const char *of_clk_get_parent_name(struct device_node *np, int index); - +int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags); void of_clk_init(const struct of_device_id *matches); #else /* !CONFIG_OF */ @@ -732,17 +811,34 @@ static inline int of_clk_add_provider(struct device_node *np, { return 0; } +static inline int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data) +{ + return 0; +} static inline void of_clk_del_provider(struct device_node *np) {} static inline struct clk *of_clk_src_simple_get( struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } +static inline struct clk_hw * +of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} static inline struct clk *of_clk_src_onecell_get( struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } +static inline struct clk_hw * +of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} static inline int of_clk_get_parent_count(struct device_node *np) { return 0; @@ -757,6 +853,11 @@ static inline const char *of_clk_get_parent_name(struct device_node *np, { return NULL; } +static inline int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags) +{ + return 0; +} static inline void of_clk_init(const struct of_device_id *matches) {} #endif /* CONFIG_OF */ diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index 7adfd80fbf55..ba6fa4148515 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h @@ -24,12 +24,20 @@ void r8a7778_clocks_init(u32 mode); void r8a7779_clocks_init(u32 mode); void rcar_gen2_clocks_init(u32 mode); -#ifdef CONFIG_PM_GENERIC_DOMAINS_OF void cpg_mstp_add_clk_domain(struct device_node *np); -int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev); -void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev); +#ifdef CONFIG_CLK_RENESAS_CPG_MSTP +int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev); #else -static inline void cpg_mstp_add_clk_domain(struct device_node *np) {} +#define cpg_mstp_attach_dev NULL +#define cpg_mstp_detach_dev NULL #endif +#ifdef CONFIG_CLK_RENESAS_CPG_MSSR +int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev); +#else +#define cpg_mssr_attach_dev NULL +#define cpg_mssr_detach_dev NULL +#endif #endif diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 57bf7aab4516..7007a5f48080 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -121,4 +121,9 @@ static inline void tegra_cpu_clock_resume(void) } #endif +extern void tegra210_xusb_pll_hw_control_enable(void); +extern void tegra210_xusb_pll_hw_sequence_start(void); +extern void tegra210_sata_pll_hw_control_enable(void); +extern void tegra210_sata_pll_hw_sequence_start(void); + #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index dc5164a6df29..6110fe09ed18 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -37,6 +37,7 @@ * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() * @min_divider: minimum valid non-bypass divider value (actual) * @max_divider: maximum valid non-bypass divider value (actual) + * @max_rate: maximum clock rate for the DPLL * @modes: possible values of @enable_mask * @autoidle_reg: register containing the DPLL autoidle mode bitfield * @idlest_reg: register containing the DPLL idle status bitfield @@ -81,6 +82,7 @@ struct dpll_data { u8 last_rounded_n; u8 min_divider; u16 max_divider; + unsigned long max_rate; u8 modes; void __iomem *autoidle_reg; void __iomem *idlest_reg; diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index c2c04f7cbe8a..2eabc862abdb 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -15,6 +15,7 @@ #include <asm/clkdev.h> struct clk; +struct clk_hw; struct device; struct clk_lookup { @@ -34,18 +35,22 @@ struct clk_lookup { struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); void clkdev_add_table(struct clk_lookup *, size_t); int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); -int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); +int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); #ifdef CONFIG_COMMON_CLK int __clk_get(struct clk *clk); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index a307bf62974f..44a1aff22566 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -15,6 +15,7 @@ #include <linux/cache.h> #include <linux/timer.h> #include <linux/init.h> +#include <linux/of.h> #include <asm/div64.h> #include <asm/io.h> diff --git a/include/linux/compaction.h b/include/linux/compaction.h index d7c8de583a23..a58c852a268f 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -2,21 +2,46 @@ #define _LINUX_COMPACTION_H /* Return values for compact_zone() and try_to_compact_pages() */ -/* compaction didn't start as it was deferred due to past failures */ -#define COMPACT_DEFERRED 0 -/* compaction didn't start as it was not possible or direct reclaim was more suitable */ -#define COMPACT_SKIPPED 1 -/* compaction should continue to another pageblock */ -#define COMPACT_CONTINUE 2 -/* direct compaction partially compacted a zone and there are suitable pages */ -#define COMPACT_PARTIAL 3 -/* The full zone was compacted */ -#define COMPACT_COMPLETE 4 -/* For more detailed tracepoint output */ -#define COMPACT_NO_SUITABLE_PAGE 5 -#define COMPACT_NOT_SUITABLE_ZONE 6 -#define COMPACT_CONTENDED 7 /* When adding new states, please adjust include/trace/events/compaction.h */ +enum compact_result { + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NOT_SUITABLE_ZONE, + /* + * compaction didn't start as it was not possible or direct reclaim + * was more suitable + */ + COMPACT_SKIPPED, + /* compaction didn't start as it was deferred due to past failures */ + COMPACT_DEFERRED, + + /* compaction not active last round */ + COMPACT_INACTIVE = COMPACT_DEFERRED, + + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NO_SUITABLE_PAGE, + /* compaction should continue to another pageblock */ + COMPACT_CONTINUE, + + /* + * The full zone was compacted scanned but wasn't successfull to compact + * suitable pages. + */ + COMPACT_COMPLETE, + /* + * direct compaction has scanned part of the zone but wasn't successfull + * to compact suitable pages. + */ + COMPACT_PARTIAL_SKIPPED, + + /* compaction terminated prematurely due to lock contentions */ + COMPACT_CONTENDED, + + /* + * direct compaction partially compacted a zone and there might be + * suitable pages + */ + COMPACT_PARTIAL, +}; /* Used to signal whether compaction detected need_sched() or lock contention */ /* No contention detected */ @@ -38,13 +63,14 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); -extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, - int alloc_flags, const struct alloc_context *ac, - enum migrate_mode mode, int *contended); +extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, + unsigned int order, + unsigned int alloc_flags, const struct alloc_context *ac, + enum migrate_mode mode, int *contended); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order, - int alloc_flags, int classzone_idx); +extern enum compact_result compaction_suitable(struct zone *zone, int order, + unsigned int alloc_flags, int classzone_idx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); @@ -52,12 +78,80 @@ extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); extern bool compaction_restarting(struct zone *zone, int order); +/* Compaction has made some progress and retrying makes sense */ +static inline bool compaction_made_progress(enum compact_result result) +{ + /* + * Even though this might sound confusing this in fact tells us + * that the compaction successfully isolated and migrated some + * pageblocks. + */ + if (result == COMPACT_PARTIAL) + return true; + + return false; +} + +/* Compaction has failed and it doesn't make much sense to keep retrying. */ +static inline bool compaction_failed(enum compact_result result) +{ + /* All zones were scanned completely and still not result. */ + if (result == COMPACT_COMPLETE) + return true; + + return false; +} + +/* + * Compaction has backed off for some reason. It might be throttling or + * lock contention. Retrying is still worthwhile. + */ +static inline bool compaction_withdrawn(enum compact_result result) +{ + /* + * Compaction backed off due to watermark checks for order-0 + * so the regular reclaim has to try harder and reclaim something. + */ + if (result == COMPACT_SKIPPED) + return true; + + /* + * If compaction is deferred for high-order allocations, it is + * because sync compaction recently failed. If this is the case + * and the caller requested a THP allocation, we do not want + * to heavily disrupt the system, so we fail the allocation + * instead of entering direct reclaim. + */ + if (result == COMPACT_DEFERRED) + return true; + + /* + * If compaction in async mode encounters contention or blocks higher + * priority task we back off early rather than cause stalls. + */ + if (result == COMPACT_CONTENDED) + return true; + + /* + * Page scanners have met but we haven't scanned full zones so this + * is a back off in fact. + */ + if (result == COMPACT_PARTIAL_SKIPPED) + return true; + + return false; +} + + +bool compaction_zonelist_suitable(struct alloc_context *ac, int order, + int alloc_flags); + extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else -static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, +static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac, enum migrate_mode mode, int *contended) @@ -73,7 +167,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order, +static inline enum compact_result compaction_suitable(struct zone *zone, int order, int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; @@ -88,6 +182,21 @@ static inline bool compaction_deferred(struct zone *zone, int order) return true; } +static inline bool compaction_made_progress(enum compact_result result) +{ + return false; +} + +static inline bool compaction_failed(enum compact_result result) +{ + return false; +} + +static inline bool compaction_withdrawn(enum compact_result result) +{ + return true; +} + static inline int kcompactd_run(int nid) { return 0; diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 3d5202eda22f..e2949397c19b 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -142,6 +142,7 @@ #if GCC_VERSION >= 30400 #define __must_check __attribute__((warn_unused_result)) +#define __malloc __attribute__((__malloc__)) #endif #if GCC_VERSION >= 40000 diff --git a/include/linux/compiler.h b/include/linux/compiler.h index b5ff9881bef8..793c0829e3a3 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -357,6 +357,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define __deprecated_for_modules #endif +#ifndef __malloc +#define __malloc +#endif + /* * Allow us to avoid 'defined but not used' warnings on functions and data, * as well as force them to be emitted to the assembly file. diff --git a/include/linux/console.h b/include/linux/console.h index ea731af2451e..98c8615dc300 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -47,7 +47,7 @@ struct consw { int (*con_font_copy)(struct vc_data *, int); int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); - int (*con_set_palette)(struct vc_data *, unsigned char *); + int (*con_set_palette)(struct vc_data *, const unsigned char *); int (*con_scrolldelta)(struct vc_data *, int); int (*con_set_origin)(struct vc_data *); void (*con_save_screen)(struct vc_data *); @@ -191,6 +191,8 @@ void vcs_remove_sysfs(int index); #ifdef CONFIG_VGA_CONSOLE extern bool vgacon_text_force(void); +#else +static inline bool vgacon_text_force(void) { return false; } #endif #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h new file mode 100644 index 000000000000..a978bb85599a --- /dev/null +++ b/include/linux/coresight-stm.h @@ -0,0 +1,6 @@ +#ifndef __LINUX_CORESIGHT_STM_H_ +#define __LINUX_CORESIGHT_STM_H_ + +#include <uapi/linux/coresight-stm.h> + +#endif diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h deleted file mode 100644 index 0414009e2c30..000000000000 --- a/include/linux/cpufreq-dt.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2014 Marvell - * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __CPUFREQ_DT_H__ -#define __CPUFREQ_DT_H__ - -struct cpufreq_dt_platform_data { - /* - * True when each CPU has its own clock to control its - * frequency, false when all CPUs are controlled by a single - * clock. - */ - bool independent_clocks; -}; - -#endif /* __CPUFREQ_DT_H__ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 718e8725de8a..4e81e08db752 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -102,6 +102,17 @@ struct cpufreq_policy { */ struct rw_semaphore rwsem; + /* + * Fast switch flags: + * - fast_switch_possible should be set by the driver if it can + * guarantee that frequency can be changed on any CPU sharing the + * policy and that the change will affect all of the policy CPUs then. + * - fast_switch_enabled is to be set by governors that support fast + * freqnency switching with the help of cpufreq_enable_fast_switch(). + */ + bool fast_switch_possible; + bool fast_switch_enabled; + /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ spinlock_t transition_lock; @@ -156,6 +167,8 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); bool have_governor_per_policy(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); +void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); +void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); #else static inline unsigned int cpufreq_get(unsigned int cpu) { @@ -236,6 +249,8 @@ struct cpufreq_driver { unsigned int relation); /* Deprecated */ int (*target_index)(struct cpufreq_policy *policy, unsigned int index); + unsigned int (*fast_switch)(struct cpufreq_policy *policy, + unsigned int target_freq); /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -426,6 +441,20 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) +/* + * The polling frequency depends on the capability of the processor. Default + * polling frequency is 1000 times the transition latency of the processor. The + * ondemand governor will work on any processor with transition latency <= 10ms, + * using appropriate sampling rate. + * + * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) + * the ondemand governor will not work. All times here are in us (microseconds). + */ +#define MIN_SAMPLING_RATE_RATIO (2) +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (20) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + /* Governor Events */ #define CPUFREQ_GOV_START 1 #define CPUFREQ_GOV_STOP 2 @@ -450,6 +479,8 @@ struct cpufreq_governor { }; /* Pass a target to the cpufreq driver */ +unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq); int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); @@ -462,6 +493,29 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor); struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); +/* Governor attribute set */ +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +/* sysfs ops for cpufreq governors */ +extern const struct sysfs_ops governor_sysfs_ops; + +void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); +void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); +unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); + +/* Governor sysfs attribute */ +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); + ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, + size_t count); +}; + /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 85a868ccb493..bfc204e70338 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -16,26 +16,26 @@ #ifdef CONFIG_CPUSETS -extern struct static_key cpusets_enabled_key; +extern struct static_key_false cpusets_enabled_key; static inline bool cpusets_enabled(void) { - return static_key_false(&cpusets_enabled_key); + return static_branch_unlikely(&cpusets_enabled_key); } static inline int nr_cpusets(void) { /* jump label reference count + the top-level cpuset */ - return static_key_count(&cpusets_enabled_key) + 1; + return static_key_count(&cpusets_enabled_key.key) + 1; } static inline void cpuset_inc(void) { - static_key_slow_inc(&cpusets_enabled_key); + static_branch_inc(&cpusets_enabled_key); } static inline void cpuset_dec(void) { - static_key_slow_dec(&cpusets_enabled_key); + static_branch_dec(&cpusets_enabled_key); } extern int cpuset_init(void); @@ -48,16 +48,25 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); +extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); -static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); + if (cpusets_enabled()) + return __cpuset_node_allowed(node, gfp_mask); + return true; } -static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return cpuset_node_allowed(zone_to_nid(z), gfp_mask); + return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + if (cpusets_enabled()) + return __cpuset_zone_allowed(z, gfp_mask); + return true; } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -172,14 +181,19 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) return 1; } -static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { - return 1; + return true; } -static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return 1; + return true; +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + return true; } static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 3849fce7ecfe..3873697ba21c 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -34,9 +34,13 @@ void vmcore_cleanup(void); /* * Architecture code can redefine this if there are any special checks - * needed for 64-bit ELF vmcores. In case of 32-bit only architecture, - * this can be set to zero. + * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit + * only architecture, vmcore_elf64_check_arch can be set to zero. */ +#ifndef vmcore_elf32_check_arch +#define vmcore_elf32_check_arch(x) elf_check_arch(x) +#endif + #ifndef vmcore_elf64_check_arch #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) #endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 99c94899ad0f..6e28c895c376 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -948,8 +948,7 @@ static inline struct ablkcipher_request *ablkcipher_request_cast( * encrypt and decrypt API calls. During the allocation, the provided ablkcipher * handle is registered in the request data structure. * - * Return: allocated request handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. + * Return: allocated request handle in case of success, or NULL if out of memory */ static inline struct ablkcipher_request *ablkcipher_request_alloc( struct crypto_ablkcipher *tfm, gfp_t gfp) diff --git a/include/linux/dax.h b/include/linux/dax.h index 636dd59ab505..982a6c4a62f3 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -5,7 +5,7 @@ #include <linux/mm.h> #include <asm/pgtable.h> -ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, +ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, get_block_t, dio_iodone_t, int flags); int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 7e9422cb5989..f8506e8dd4d4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -123,7 +123,10 @@ struct dentry { unsigned long d_time; /* used by d_revalidate */ void *d_fsdata; /* fs-specific data */ - struct list_head d_lru; /* LRU list */ + union { + struct list_head d_lru; /* LRU list */ + wait_queue_head_t *d_wait; /* in-lookup ones only */ + }; struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ /* @@ -131,6 +134,7 @@ struct dentry { */ union { struct hlist_node d_alias; /* inode alias list */ + struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; }; @@ -232,6 +236,8 @@ struct dentry_operations { #define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */ #define DCACHE_OP_REAL 0x08000000 +#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ + extern seqlock_t rename_lock; /* @@ -248,6 +254,8 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); +extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, + wait_queue_head_t *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); @@ -367,6 +375,22 @@ static inline void dont_mount(struct dentry *dentry) spin_unlock(&dentry->d_lock); } +extern void __d_lookup_done(struct dentry *); + +static inline int d_in_lookup(struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_PAR_LOOKUP; +} + +static inline void d_lookup_done(struct dentry *dentry) +{ + if (unlikely(d_in_lookup(dentry))) { + spin_lock(&dentry->d_lock); + __d_lookup_done(dentry); + spin_unlock(&dentry->d_lock); + } +} + extern void dput(struct dentry *); static inline bool d_managed(const struct dentry *dentry) diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 981e53ab84e8..1438e2322d5c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -19,9 +19,11 @@ #include <linux/seq_file.h> #include <linux/types.h> +#include <linux/compiler.h> struct device; struct file_operations; +struct srcu_struct; struct debugfs_blob_wrapper { void *data; @@ -41,14 +43,16 @@ struct debugfs_regset32 { extern struct dentry *arch_debugfs_dir; -#if defined(CONFIG_DEBUG_FS) +extern struct srcu_struct debugfs_srcu; -/* declared over in file.c */ -extern const struct file_operations debugfs_file_operations; +#if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); +struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); struct dentry *debugfs_create_file_size(const char *name, umode_t mode, struct dentry *parent, void *data, @@ -68,6 +72,31 @@ struct dentry *debugfs_create_automount(const char *name, void debugfs_remove(struct dentry *dentry); void debugfs_remove_recursive(struct dentry *dentry); +int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx) + __acquires(&debugfs_srcu); + +void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu); + +ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); +ssize_t debugfs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); + +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = debugfs_attr_read, \ + .write = debugfs_attr_write, \ + .llseek = generic_file_llseek, \ +} + struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -176,6 +205,20 @@ static inline void debugfs_remove(struct dentry *dentry) static inline void debugfs_remove_recursive(struct dentry *dentry) { } +static inline int debugfs_use_file_start(const struct dentry *dentry, + int *srcu_idx) + __acquires(&debugfs_srcu) +{ + return 0; +} + +static inline void debugfs_use_file_finish(int srcu_idx) + __releases(&debugfs_srcu) +{ } + +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ + static const struct file_operations __fops = { 0 } + static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) { diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 98ffcbd4888e..46056cb161fc 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -38,8 +38,10 @@ struct debug_obj { * @name: name of the object typee * @debug_hint: function returning address, which have associated * kernel symbol, to allow identify the object + * @is_static_object return true if the obj is static, otherwise return false * @fixup_init: fixup function, which is called when the init check - * fails + * fails. All fixup functions must return true if fixup + * was successful, otherwise return false * @fixup_activate: fixup function, which is called when the activate check * fails * @fixup_destroy: fixup function, which is called when the destroy check @@ -51,12 +53,13 @@ struct debug_obj { */ struct debug_obj_descr { const char *name; - void *(*debug_hint) (void *addr); - int (*fixup_init) (void *addr, enum debug_obj_state state); - int (*fixup_activate) (void *addr, enum debug_obj_state state); - int (*fixup_destroy) (void *addr, enum debug_obj_state state); - int (*fixup_free) (void *addr, enum debug_obj_state state); - int (*fixup_assert_init)(void *addr, enum debug_obj_state state); + void *(*debug_hint)(void *addr); + bool (*is_static_object)(void *addr); + bool (*fixup_init)(void *addr, enum debug_obj_state state); + bool (*fixup_activate)(void *addr, enum debug_obj_state state); + bool (*fixup_destroy)(void *addr, enum debug_obj_state state); + bool (*fixup_free)(void *addr, enum debug_obj_state state); + bool (*fixup_assert_init)(void *addr, enum debug_obj_state state); }; #ifdef CONFIG_DEBUG_OBJECTS diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h index c0a360e99f64..269521f143ac 100644 --- a/include/linux/devcoredump.h +++ b/include/linux/devcoredump.h @@ -1,3 +1,22 @@ +/* + * This file is provided under the GPLv2 license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + */ #ifndef __DEVCOREDUMP_H #define __DEVCOREDUMP_H @@ -5,17 +24,62 @@ #include <linux/module.h> #include <linux/vmalloc.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> + +/* + * _devcd_free_sgtable - free all the memory of the given scatterlist table + * (i.e. both pages and scatterlist instances) + * NOTE: if two tables allocated and chained using the sg_chain function then + * this function should be called only once on the first table + * @table: pointer to sg_table to free + */ +static inline void _devcd_free_sgtable(struct scatterlist *table) +{ + int i; + struct page *page; + struct scatterlist *iter; + struct scatterlist *delete_iter; + + /* free pages */ + iter = table; + for_each_sg(table, iter, sg_nents(table), i) { + page = sg_page(iter); + if (page) + __free_page(page); + } + + /* then free all chained tables */ + iter = table; + delete_iter = table; /* always points on a head of a table */ + while (!sg_is_last(iter)) { + iter++; + if (sg_is_chain(iter)) { + iter = sg_chain_ptr(iter); + kfree(delete_iter); + delete_iter = iter; + } + } + + /* free the last table */ + kfree(delete_iter); +} + + #ifdef CONFIG_DEV_COREDUMP -void dev_coredumpv(struct device *dev, const void *data, size_t datalen, +void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp); void dev_coredumpm(struct device *dev, struct module *owner, - const void *data, size_t datalen, gfp_t gfp, + void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen), - void (*free)(const void *data)); + void *data, size_t datalen), + void (*free)(void *data)); + +void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp); #else -static inline void dev_coredumpv(struct device *dev, const void *data, +static inline void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) { vfree(data); @@ -23,13 +87,19 @@ static inline void dev_coredumpv(struct device *dev, const void *data, static inline void dev_coredumpm(struct device *dev, struct module *owner, - const void *data, size_t datalen, gfp_t gfp, + void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen), - void (*free)(const void *data)) + void *data, size_t datalen), + void (*free)(void *data)) { free(data); } + +static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp) +{ + _devcd_free_sgtable(table); +} #endif /* CONFIG_DEV_COREDUMP */ #endif /* __DEVCOREDUMP_H */ diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 6fa02a20eb63..2de4e2eea180 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -19,6 +19,13 @@ #define DEVFREQ_NAME_LEN 16 +/* DEVFREQ notifier interface */ +#define DEVFREQ_TRANSITION_NOTIFIER (0) + +/* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */ +#define DEVFREQ_PRECHANGE (0) +#define DEVFREQ_POSTCHANGE (1) + struct devfreq; /** @@ -143,6 +150,7 @@ struct devfreq_governor { * @trans_table: Statistics of devfreq transitions * @time_in_state: Statistics of devfreq states * @last_stat_updated: The last time stat updated + * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier * * This structure stores the devfreq information for a give device. * @@ -177,6 +185,13 @@ struct devfreq { unsigned int *trans_table; unsigned long *time_in_state; unsigned long last_stat_updated; + + struct srcu_notifier_head transition_notifier_list; +}; + +struct devfreq_freqs { + unsigned long old; + unsigned long new; }; #if defined(CONFIG_PM_DEVFREQ) @@ -207,6 +222,22 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq); extern void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); +extern int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index); /** * devfreq_update_stats() - update the last_status pointer in struct devfreq @@ -241,6 +272,39 @@ struct devfreq_simple_ondemand_data { }; #endif +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) +/** + * struct devfreq_passive_data - void *data fed to struct devfreq + * and devfreq_add_device + * @parent: the devfreq instance of parent device. + * @get_target_freq: Optional callback, Returns desired operating frequency + * for the device using passive governor. That is called + * when passive governor should decide the next frequency + * by using the new frequency of parent devfreq device + * using governors except for passive governor. + * If the devfreq device has the specific method to decide + * the next frequency, should use this callback. + * @this: the devfreq instance of own device. + * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list + * + * The devfreq_passive_data have to set the devfreq instance of parent + * device with governors except for the passive governor. But, don't need to + * initialize the 'this' and 'nb' field because the devfreq core will handle + * them. + */ +struct devfreq_passive_data { + /* Should set the devfreq instance of parent device */ + struct devfreq *parent; + + /* Optional callback to decide the next frequency of passvice device */ + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + + /* For passive governor's internal use. Don't need to set them */ + struct devfreq *this; + struct notifier_block nb; +}; +#endif + #else /* !CONFIG_PM_DEVFREQ */ static inline struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, @@ -307,6 +371,41 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, { } +static inline int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ +} + +static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index) +{ + return ERR_PTR(-ENODEV); +} + static inline int devfreq_update_stats(struct devfreq *df) { return -EINVAL; diff --git a/include/linux/device.h b/include/linux/device.h index 002c59728dbe..38f02814d53a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -609,14 +609,14 @@ typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid, const char *name); + int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) #else extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid); + int nid) __malloc; static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); @@ -648,12 +648,12 @@ extern void devres_remove_group(struct device *dev, void *id); extern int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ -extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); +extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; extern __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, - va_list ap); + va_list ap) __malloc; extern __printf(3, 4) -char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); @@ -671,7 +671,7 @@ static inline void *devm_kcalloc(struct device *dev, return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } extern void devm_kfree(struct device *dev, void *p); -extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); @@ -956,11 +956,6 @@ static inline bool device_async_suspend_enabled(struct device *dev) return !!dev->power.async_suspend; } -static inline void pm_suspend_ignore_children(struct device *dev, bool enable) -{ - dev->power.ignore_children = enable; -} - static inline void dev_pm_syscore_device(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP @@ -1293,8 +1288,11 @@ do { \ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ } while (0) #else -#define dev_dbg_ratelimited(dev, fmt, ...) \ - no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + if (0) \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ +} while (0) #endif #ifdef VERBOSE_DEBUG diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index fc481037478a..8443bbb5c071 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); * These implement the bulk of the relevant DMA mapping callbacks, but require * the arch code to take care of attributes and cache maintenance */ -struct page **iommu_dma_alloc(struct device *dev, size_t size, - gfp_t gfp, int prot, dma_addr_t *handle, +struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, + struct dma_attrs *attrs, int prot, dma_addr_t *handle, void (*flush_page)(struct device *, const void *, phys_addr_t)); void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 9ea9aba28049..71c1b215ef66 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev); #ifndef arch_setup_dma_ops static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, struct iommu_ops *iommu, + u64 size, const struct iommu_ops *iommu, bool coherent) { } #endif diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index 71456442ebe3..f2e538aaddad 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -27,6 +27,7 @@ struct dw_dma; * @regs: memory mapped I/O space * @clk: hclk clock * @dw: struct dw_dma that is filed by dw_dma_probe() + * @pdata: pointer to platform data */ struct dw_dma_chip { struct device *dev; @@ -34,10 +35,12 @@ struct dw_dma_chip { void __iomem *regs; struct clk *clk; struct dw_dma *dw; + + const struct dw_dma_platform_data *pdata; }; /* Export to the platform drivers */ -int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); +int dw_dma_probe(struct dw_dma_chip *chip); int dw_dma_remove(struct dw_dma_chip *chip); /* DMA API extensions */ diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h index 34b98f276ed0..3ae300052553 100644 --- a/include/linux/dma/xilinx_dma.h +++ b/include/linux/dma/xilinx_dma.h @@ -41,6 +41,20 @@ struct xilinx_vdma_config { int ext_fsync; }; +/** + * enum xdma_ip_type: DMA IP type. + * + * XDMA_TYPE_AXIDMA: Axi dma ip. + * XDMA_TYPE_CDMA: Axi cdma ip. + * XDMA_TYPE_VDMA: Axi vdma ip. + * + */ +enum xdma_ip_type { + XDMA_TYPE_AXIDMA = 0, + XDMA_TYPE_CDMA, + XDMA_TYPE_VDMA, +}; + int xilinx_vdma_channel_set_config(struct dma_chan *dchan, struct xilinx_vdma_config *cfg); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 017433712833..30de0197263a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( sg_dma_address(&sg) = buf; sg_dma_len(&sg) = len; + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags, NULL); } @@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, NULL); } @@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( enum dma_transfer_direction dir, unsigned long flags, struct rio_dma_ext *rio_ext) { + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, rio_ext); } @@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) + return NULL; + return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, period_len, dir, flags); } @@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) + return NULL; + return chan->device->device_prep_interleaved_dma(chan, xt, flags); } @@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { - if (!chan || !chan->device) + if (!chan || !chan->device || !chan->device->device_prep_dma_memset) return NULL; return chan->device->device_prep_dma_memset(chan, dest, value, @@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( struct scatterlist *src_sg, unsigned int src_nents, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_dma_sg) + return NULL; + return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, src_sg, src_nents, flags); } diff --git a/include/linux/efi.h b/include/linux/efi.h index df7acb51f3cc..c2db3ca22217 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -21,6 +21,7 @@ #include <linux/pfn.h> #include <linux/pstore.h> #include <linux/reboot.h> +#include <linux/uuid.h> #include <linux/screen_info.h> #include <asm/page.h> @@ -44,17 +45,10 @@ typedef u16 efi_char16_t; /* UNICODE character */ typedef u64 efi_physical_addr_t; typedef void *efi_handle_t; - -typedef struct { - u8 b[16]; -} efi_guid_t; +typedef uuid_le efi_guid_t; #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ -((efi_guid_t) \ -{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ - (b) & 0xff, ((b) >> 8) & 0xff, \ - (c) & 0xff, ((c) >> 8) & 0xff, \ - (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) /* * Generic EFI table header @@ -1117,7 +1111,7 @@ extern int efi_status_to_err(efi_status_t status); * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")) * not including trailing NUL */ -#define EFI_VARIABLE_GUID_LEN 36 +#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN /* * The type of search to perform when calling boottime->locate_handle diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e2b7bf27c03e..9ded8c6d8176 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -150,6 +150,13 @@ extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32); + +/* return false if src had higher bits set. lower bits always updated. */ +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src); + /** * struct ethtool_ops - optional netdev operations * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index b90e9bdbd1dd..4c02c6521fef 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -508,4 +508,6 @@ enum { F2FS_FT_MAX }; +#define S_SHIFT 12 + #endif /* _LINUX_F2FS_FS_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h index dfe88351341f..a964d076b4dc 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -673,6 +673,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, } /* drivers/video/fb_defio.c */ +int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); extern void fb_deferred_io_init(struct fb_info *info); extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, diff --git a/include/linux/file.h b/include/linux/file.h index f87d30882a24..7444f5feda12 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -44,6 +44,7 @@ extern struct file *fget_raw(unsigned int fd); extern unsigned long __fdget(unsigned int fd); extern unsigned long __fdget_raw(unsigned int fd); extern unsigned long __fdget_pos(unsigned int fd); +extern void __f_unlock_pos(struct file *); static inline struct fd __to_fd(unsigned long v) { @@ -60,6 +61,18 @@ static inline struct fd fdget_raw(unsigned int fd) return __to_fd(__fdget_raw(fd)); } +static inline struct fd fdget_pos(int fd) +{ + return __to_fd(__fdget_pos(fd)); +} + +static inline void fdput_pos(struct fd f) +{ + if (f.flags & FDPUT_POS_UNLOCK) + __f_unlock_pos(f.file); + fdput(f); +} + extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); extern void set_close_on_exec(unsigned int fd, int flag); diff --git a/include/linux/filter.h b/include/linux/filter.h index a51a5361695f..6fc31ef1da2d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -13,6 +13,8 @@ #include <linux/printk.h> #include <linux/workqueue.h> #include <linux/sched.h> +#include <linux/capability.h> + #include <net/sch_generic.h> #include <asm/cacheflush.h> @@ -42,6 +44,15 @@ struct bpf_prog_aux; #define BPF_REG_X BPF_REG_7 #define BPF_REG_TMP BPF_REG_8 +/* Kernel hidden auxiliary/helper register for hardening step. + * Only used by eBPF JITs. It's nothing more than a temporary + * register that JITs use internally, only that here it's part + * of eBPF instructions that have been rewritten for blinding + * constants. See JIT pre-step in bpf_jit_blind_constants(). + */ +#define BPF_REG_AX MAX_BPF_REG +#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) + /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 @@ -352,6 +363,22 @@ struct sk_filter { #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_end; +}; + +/* compute the linear packet data range [data, data_end) which + * will be accessed by cls_bpf and act_bpf programs + */ +static inline void bpf_compute_data_end(struct sk_buff *skb) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); + cb->data_end = skb->data + skb_headlen(skb); +} + static inline u8 *bpf_skb_cb(struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta @@ -442,7 +469,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) int sk_filter(struct sock *sk, struct sk_buff *skb); -int bpf_prog_select_runtime(struct bpf_prog *fp); +struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); @@ -465,14 +492,10 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); -int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, - bool locked); int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); -int __sk_detach_filter(struct sock *sk, bool locked); - int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); @@ -480,10 +503,17 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_int_jit_compile(struct bpf_prog *fp); + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_skb_data(void *func); +struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, + const struct bpf_insn *patch, u32 len); + #ifdef CONFIG_BPF_JIT +extern int bpf_jit_enable; +extern int bpf_jit_harden; + typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); struct bpf_binary_header * @@ -495,6 +525,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); +struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); +void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); + static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { @@ -505,6 +538,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } + +static inline bool bpf_jit_is_ebpf(void) +{ +# ifdef CONFIG_HAVE_EBPF_JIT + return true; +# else + return false; +# endif +} + +static inline bool bpf_jit_blinding_enabled(void) +{ + /* These are the prerequisites, should someone ever have the + * idea to call blinding outside of them, we make sure to + * bail out. + */ + if (!bpf_jit_is_ebpf()) + return false; + if (!bpf_jit_enable) + return false; + if (!bpf_jit_harden) + return false; + if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) + return false; + + return true; +} #else static inline void bpf_jit_compile(struct bpf_prog *fp) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 70e61b58baaf..5f61431d8673 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -323,6 +323,8 @@ struct writeback_control; #define IOCB_APPEND (1 << 1) #define IOCB_DIRECT (1 << 2) #define IOCB_HIPRI (1 << 3) +#define IOCB_DSYNC (1 << 4) +#define IOCB_SYNC (1 << 5) struct kiocb { struct file *ki_filp; @@ -394,7 +396,7 @@ struct address_space_operations { void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); - ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* * migrate the contents of a page to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. @@ -577,6 +579,18 @@ static inline void mapping_allow_writable(struct address_space *mapping) struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) +static inline struct posix_acl * +uncached_acl_sentinel(struct task_struct *task) +{ + return (void *)task + 1; +} + +static inline bool +is_uncached_acl(struct posix_acl *acl) +{ + return (long)acl & 1; +} + #define IOP_FASTPERM 0x0001 #define IOP_LOOKUP 0x0002 #define IOP_NOFOLLOW 0x0004 @@ -635,7 +649,7 @@ struct inode { /* Misc */ unsigned long i_state; - struct mutex i_mutex; + struct rw_semaphore i_rwsem; unsigned long dirtied_when; /* jiffies of first dirtying */ unsigned long dirtied_time_when; @@ -672,6 +686,7 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; + unsigned i_dir_seq; }; __u32 i_generation; @@ -721,27 +736,42 @@ enum inode_i_mutex_lock_class static inline void inode_lock(struct inode *inode) { - mutex_lock(&inode->i_mutex); + down_write(&inode->i_rwsem); } static inline void inode_unlock(struct inode *inode) { - mutex_unlock(&inode->i_mutex); + up_write(&inode->i_rwsem); +} + +static inline void inode_lock_shared(struct inode *inode) +{ + down_read(&inode->i_rwsem); +} + +static inline void inode_unlock_shared(struct inode *inode) +{ + up_read(&inode->i_rwsem); } static inline int inode_trylock(struct inode *inode) { - return mutex_trylock(&inode->i_mutex); + return down_write_trylock(&inode->i_rwsem); +} + +static inline int inode_trylock_shared(struct inode *inode) +{ + return down_read_trylock(&inode->i_rwsem); } static inline int inode_is_locked(struct inode *inode) { - return mutex_is_locked(&inode->i_mutex); + return rwsem_is_locked(&inode->i_rwsem); } static inline void inode_lock_nested(struct inode *inode, unsigned subclass) { - mutex_lock_nested(&inode->i_mutex, subclass); + down_write_nested(&inode->i_rwsem, subclass); } void lock_two_nondirectories(struct inode *, struct inode*); @@ -1646,6 +1676,7 @@ struct file_operations { ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); + int (*iterate_shared) (struct file *, struct dir_context *); unsigned int (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); @@ -1700,7 +1731,8 @@ struct inode_operations { int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); - ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); + ssize_t (*getxattr) (struct dentry *, struct inode *, + const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, @@ -2263,7 +2295,7 @@ struct filename { const char iname[]; }; -extern long vfs_truncate(struct path *, loff_t); +extern long vfs_truncate(const struct path *, loff_t); extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); extern int vfs_fallocate(struct file *file, int mode, loff_t offset, @@ -2320,14 +2352,6 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); -#ifdef CONFIG_FS_DAX -extern bool blkdev_dax_capable(struct block_device *bdev); -#else -static inline bool blkdev_dax_capable(struct block_device *bdev) -{ - return false; -} -#endif extern struct super_block *blockdev_superblock; @@ -2395,6 +2419,8 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, /* fs/char_dev.c */ #define CHRDEV_MAJOR_HASH_SIZE 255 +/* Marks the bottom of the first segment of free char majors */ +#define CHRDEV_MAJOR_DYN_END 234 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int __register_chrdev(unsigned int major, unsigned int baseminor, @@ -2485,13 +2511,25 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); -static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count) -{ - if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) - return 0; - return vfs_fsync_range(file, pos, pos + count - 1, - (file->f_flags & __O_SYNC) ? 0 : 1); + +/* + * Sync the bytes written if this was a synchronous write. Expect ki_pos + * to already be updated for the write, and will return either the amount + * of bytes passed in, or an error if syncing the file failed. + */ +static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) +{ + if (iocb->ki_flags & IOCB_DSYNC) { + int ret = vfs_fsync_range(iocb->ki_filp, + iocb->ki_pos - count, iocb->ki_pos - 1, + (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); + if (ret) + return ret; + } + + return count; } + extern void emergency_sync(void); extern void emergency_remount(void); #ifdef CONFIG_BLOCK @@ -2590,15 +2628,34 @@ static inline void i_readcount_inc(struct inode *inode) #endif extern int do_pipe_flags(int *, int); +#define __kernel_read_file_id(id) \ + id(UNKNOWN, unknown) \ + id(FIRMWARE, firmware) \ + id(MODULE, kernel-module) \ + id(KEXEC_IMAGE, kexec-image) \ + id(KEXEC_INITRAMFS, kexec-initramfs) \ + id(POLICY, security-policy) \ + id(MAX_ID, ) + +#define __fid_enumify(ENUM, dummy) READING_ ## ENUM, +#define __fid_stringify(dummy, str) #str, + enum kernel_read_file_id { - READING_FIRMWARE = 1, - READING_MODULE, - READING_KEXEC_IMAGE, - READING_KEXEC_INITRAMFS, - READING_POLICY, - READING_MAX_ID + __kernel_read_file_id(__fid_enumify) }; +static const char * const kernel_read_file_str[] = { + __kernel_read_file_id(__fid_stringify) +}; + +static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) +{ + if (id < 0 || id >= READING_MAX_ID) + return kernel_read_file_str[READING_UNKNOWN]; + + return kernel_read_file_str[id]; +} + extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, enum kernel_read_file_id); @@ -2703,7 +2760,7 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); -extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t); +extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos); @@ -2766,18 +2823,17 @@ void dio_end_io(struct bio *bio, int error); ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, - loff_t offset, get_block_t get_block, + get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags); static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, - struct iov_iter *iter, loff_t offset, + struct iov_iter *iter, get_block_t get_block) { return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, - offset, get_block, NULL, NULL, - DIO_LOCKING | DIO_SKIP_HOLES); + get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); } #endif @@ -2943,6 +2999,10 @@ static inline int iocb_flags(struct file *file) res |= IOCB_APPEND; if (io_is_direct(file)) res |= IOCB_DIRECT; + if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) + res |= IOCB_DSYNC; + if (file->f_flags & __O_SYNC) + res |= IOCB_SYNC; return res; } @@ -3104,6 +3164,13 @@ static inline bool dir_relax(struct inode *inode) return !IS_DEADDIR(inode); } +static inline bool dir_relax_shared(struct inode *inode) +{ + inode_unlock_shared(inode); + inode_lock_shared(inode); + return !IS_DEADDIR(inode); +} + extern bool path_noexec(const struct path *path); extern void inode_nohighmem(struct inode *inode); diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h index 6027f6bbb061..cfa6cde25f8e 100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h @@ -175,6 +175,7 @@ struct fscrypt_name { */ struct fscrypt_operations { int (*get_context)(struct inode *, void *, size_t); + int (*key_prefix)(struct inode *, u8 **); int (*prepare_context)(struct inode *); int (*set_context)(struct inode *, const void *, size_t, void *); int (*dummy_context)(struct inode *); diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index 0023088b253b..3f9778cbc79d 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -39,6 +39,10 @@ #define FSL_IFC_VERSION_MASK 0x0F0F0000 #define FSL_IFC_VERSION_1_0_0 0x01000000 #define FSL_IFC_VERSION_1_1_0 0x01010000 +#define FSL_IFC_VERSION_2_0_0 0x02000000 + +#define PGOFFSET_64K (64*1024) +#define PGOFFSET_4K (4*1024) /* * CSPR - Chip Select Property Register @@ -723,20 +727,26 @@ struct fsl_ifc_nand { __be32 nand_evter_en; u32 res17[0x2]; __be32 nand_evter_intr_en; - u32 res18[0x2]; + __be32 nand_vol_addr_stat; + u32 res18; __be32 nand_erattr0; __be32 nand_erattr1; u32 res19[0x10]; __be32 nand_fsr; - u32 res20; - __be32 nand_eccstat[4]; - u32 res21[0x20]; + u32 res20[0x3]; + __be32 nand_eccstat[6]; + u32 res21[0x1c]; __be32 nanndcr; u32 res22[0x2]; __be32 nand_autoboot_trgr; u32 res23; __be32 nand_mdr; - u32 res24[0x5C]; + u32 res24[0x1C]; + __be32 nand_dll_lowcfg0; + __be32 nand_dll_lowcfg1; + u32 res25; + __be32 nand_dll_lowstat; + u32 res26[0x3c]; }; /* @@ -771,13 +781,12 @@ struct fsl_ifc_gpcm { __be32 gpcm_erattr1; __be32 gpcm_erattr2; __be32 gpcm_stat; - u32 res4[0x1F3]; }; /* * IFC Controller Registers */ -struct fsl_ifc_regs { +struct fsl_ifc_global { __be32 ifc_rev; u32 res1[0x2]; struct { @@ -803,21 +812,26 @@ struct fsl_ifc_regs { } ftim_cs[FSL_IFC_BANK_COUNT]; u32 res9[0x30]; __be32 rb_stat; - u32 res10[0x2]; + __be32 rb_map; + __be32 wb_map; __be32 ifc_gcr; - u32 res11[0x2]; + u32 res10[0x2]; __be32 cm_evter_stat; - u32 res12[0x2]; + u32 res11[0x2]; __be32 cm_evter_en; - u32 res13[0x2]; + u32 res12[0x2]; __be32 cm_evter_intr_en; - u32 res14[0x2]; + u32 res13[0x2]; __be32 cm_erattr0; __be32 cm_erattr1; - u32 res15[0x2]; + u32 res14[0x2]; __be32 ifc_ccr; __be32 ifc_csr; - u32 res16[0x2EB]; + __be32 ddr_ccr_low; +}; + + +struct fsl_ifc_runtime { struct fsl_ifc_nand ifc_nand; struct fsl_ifc_nor ifc_nor; struct fsl_ifc_gpcm ifc_gpcm; @@ -831,7 +845,8 @@ extern int fsl_ifc_find(phys_addr_t addr_base); struct fsl_ifc_ctrl { /* device info */ struct device *dev; - struct fsl_ifc_regs __iomem *regs; + struct fsl_ifc_global __iomem *gregs; + struct fsl_ifc_runtime __iomem *rregs; int irq; int nand_irq; spinlock_t lock; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1259e53d9296..29f917517299 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -359,8 +359,6 @@ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); /* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); -/* run all the marks in a group, and flag them to be freed */ -extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_unmount_inodes(struct super_block *sb); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index dea12a6e413b..66a36a815f0a 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); +unsigned long ftrace_location_range(unsigned long start, unsigned long end); unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5c706765404a..359a8e4bd44d 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -14,6 +14,7 @@ #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> +#include <linux/uuid.h> #ifdef CONFIG_BLOCK @@ -93,7 +94,7 @@ struct disk_stats { * Enough for the string representation of any kind of UUID plus NULL. * EFI UUID is 36 characters. MSDOS UUID is 11 characters. */ -#define PARTITION_META_INFO_UUIDLTH 37 +#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) struct partition_meta_info { char uuid[PARTITION_META_INFO_UUIDLTH]; @@ -228,27 +229,9 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part) return NULL; } -static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) -{ - int i; - for (i = 0; i < 16; ++i) { - *to++ = (hex_to_bin(*uuid_str) << 4) | - (hex_to_bin(*(uuid_str + 1))); - uuid_str += 2; - switch (i) { - case 3: - case 5: - case 7: - case 9: - uuid_str++; - continue; - } - } -} - static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to) { - part_pack_uuid(uuid_str, to); + uuid_be_to_bin(uuid_str, (uuid_be *)to); return 0; } diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index eecd19b37001..6270a56e5edc 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -62,6 +62,11 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); /* MAGIC helpers {{{2 */ +static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0); +} + /* possible field types */ #define __flg_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U8, char, \ @@ -80,7 +85,7 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); nla_get_u32, nla_put_u32, true) #define __u64_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U64, __u64, \ - nla_get_u64, nla_put_u64, false) + nla_get_u64, nla_put_u64_0pad, false) #define __str_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \ nla_strlcpy, nla_put, false) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index bee976f82788..50882e09289b 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -20,6 +20,18 @@ struct gpio_device; #ifdef CONFIG_GPIOLIB /** + * enum single_ended_mode - mode for single ended operation + * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low + * @LINE_MODE_OPEN_DRAIN: set line to be open drain + * @LINE_MODE_OPEN_SOURCE: set line to be open source + */ +enum single_ended_mode { + LINE_MODE_PUSH_PULL, + LINE_MODE_OPEN_DRAIN, + LINE_MODE_OPEN_SOURCE, +}; + +/** * struct gpio_chip - abstract a GPIO controller * @label: a functional name for the GPIO device, such as a part * number or the name of the SoC IP-block implementing it. @@ -38,7 +50,15 @@ struct gpio_device; * @set: assigns output value for signal "offset" * @set_multiple: assigns output values for multiple signals defined by "mask" * @set_debounce: optional hook for setting debounce time for specified gpio in - * interrupt triggered gpio chips + * interrupt triggered gpio chips + * @set_single_ended: optional hook for setting a line as open drain, open + * source, or non-single ended (restore from open drain/source to normal + * push-pull mode) this should be implemented if the hardware supports + * open drain or open source settings. The GPIOlib will otherwise try + * to emulate open drain/source by not actively driving lines high/low + * if a consumer request this. The driver may return -ENOTSUPP if e.g. + * it supports just open drain but not open source and is called + * with LINE_MODE_OPEN_SOURCE as mode argument. * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; * implementation may not sleep * @dbg_show: optional routine to show contents in debugfs; default code @@ -130,6 +150,9 @@ struct gpio_chip { int (*set_debounce)(struct gpio_chip *chip, unsigned offset, unsigned debounce); + int (*set_single_ended)(struct gpio_chip *chip, + unsigned offset, + enum single_ended_mode mode); int (*to_irq)(struct gpio_chip *chip, unsigned offset); diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index dfd59d6bc6f0..c683996110b1 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -61,6 +61,7 @@ extern void irq_exit(void); #define nmi_enter() \ do { \ + printk_nmi_enter(); \ lockdep_off(); \ ftrace_nmi_enter(); \ BUG_ON(in_nmi()); \ @@ -77,6 +78,7 @@ extern void irq_exit(void); preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ ftrace_nmi_exit(); \ lockdep_on(); \ + printk_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index d7b9e5346fba..419fb9e03447 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -28,9 +28,7 @@ extern int zap_huge_pmd(struct mmu_gather *tlb, extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); -extern bool move_huge_pmd(struct vm_area_struct *vma, - struct vm_area_struct *new_vma, - unsigned long old_addr, +extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 7d953c2542a8..c26d4638f665 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -338,6 +338,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, /* arch callback */ int __init alloc_bootmem_huge_page(struct hstate *h); +void __init hugetlb_bad_size(void); void __init hugetlb_add_hstate(unsigned order); struct hstate *size_to_hstate(unsigned long size); @@ -352,9 +353,7 @@ extern unsigned int default_hstate_idx; static inline struct hstate *hstate_inode(struct inode *i) { - struct hugetlbfs_sb_info *hsb; - hsb = HUGETLBFS_SB(i->i_sb); - return hsb->hstate; + return HUGETLBFS_SB(i->i_sb)->hstate; } static inline struct hstate *hstate_file(struct file *f) @@ -453,12 +452,12 @@ static inline pgoff_t basepage_index(struct page *page) extern void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); -static inline int hugepage_migration_supported(struct hstate *h) +static inline bool hugepage_migration_supported(struct hstate *h) { #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION return huge_page_shift(h) == PMD_SHIFT; #else - return 0; + return false; #endif } @@ -520,7 +519,7 @@ static inline pgoff_t basepage_index(struct page *page) return page->index; } #define dissolve_free_huge_pages(s, e) do {} while (0) -#define hugepage_migration_supported(h) 0 +#define hugepage_migration_supported(h) false static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 24154c26d469..063962f6dfc6 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -93,20 +93,17 @@ hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page) { - return; } static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) { - return; } static inline void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg) { - return; } static inline void hugetlb_cgroup_file_init(void) @@ -116,7 +113,6 @@ static inline void hugetlb_cgroup_file_init(void) static inline void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) { - return; } #endif /* CONFIG_MEM_RES_CTLR_HUGETLB */ diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 2bb681fbeb35..a4e7ca0f3585 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -5,16 +5,16 @@ #include <linux/mm.h> -static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); } #else -static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { - return 0; + return false; } #endif diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index aa0fadce9308..b10954a66939 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -126,6 +126,8 @@ struct hv_ring_buffer_info { u32 ring_datasize; /* < ring_size */ u32 ring_data_startoffset; + u32 priv_write_index; + u32 priv_read_index; }; /* @@ -151,6 +153,33 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, *read = dsize - *write; } +static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, read; + + dsize = rbi->ring_datasize; + read_loc = rbi->ring_buffer->read_index; + write_loc = READ_ONCE(rbi->ring_buffer->write_index); + + read = write_loc >= read_loc ? (write_loc - read_loc) : + (dsize - read_loc) + write_loc; + + return read; +} + +static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, write; + + dsize = rbi->ring_datasize; + read_loc = READ_ONCE(rbi->ring_buffer->read_index); + write_loc = rbi->ring_buffer->write_index; + + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + return write; +} + /* * VMBUS version is 32 bit entity broken up into * two 16 bit quantities: major_number. minor_number. @@ -1091,7 +1120,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t min, resource_size_t max, resource_size_t size, resource_size_t align, bool fb_overlap_ok); - +void vmbus_free_mmio(resource_size_t start, resource_size_t size); int vmbus_cpu_number_to_vp_number(int cpu_number); u64 hv_do_hypercall(u64 control, void *input, void *output); @@ -1338,4 +1367,143 @@ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, const uuid_le *shv_host_servie_id); +void vmbus_set_event(struct vmbus_channel *channel); + +/* Get the start of the ring buffer. */ +static inline void * +hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) +{ + return (void *)ring_info->ring_buffer->buffer; +} + +/* + * To optimize the flow management on the send-side, + * when the sender is blocked because of lack of + * sufficient space in the ring buffer, potential the + * consumer of the ring buffer can signal the producer. + * This is controlled by the following parameters: + * + * 1. pending_send_sz: This is the size in bytes that the + * producer is trying to send. + * 2. The feature bit feat_pending_send_sz set to indicate if + * the consumer of the ring will signal when the ring + * state transitions from being full to a state where + * there is room for the producer to send the pending packet. + */ + +static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) +{ + u32 cur_write_sz; + u32 pending_sz; + + /* + * Issue a full memory barrier before making the signaling decision. + * Here is the reason for having this barrier: + * If the reading of the pend_sz (in this function) + * were to be reordered and read before we commit the new read + * index (in the calling function) we could + * have a problem. If the host were to set the pending_sz after we + * have sampled pending_sz and go to sleep before we commit the + * read index, we could miss sending the interrupt. Issue a full + * memory barrier to address this. + */ + virt_mb(); + + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + /* If the other end is not blocked on write don't bother. */ + if (pending_sz == 0) + return false; + + cur_write_sz = hv_get_bytes_to_write(rbi); + + if (cur_write_sz >= pending_sz) + return true; + + return false; +} + +/* + * An API to support in-place processing of incoming VMBUS packets. + */ +#define VMBUS_PKT_TRAILER 8 + +static inline struct vmpacket_descriptor * +get_next_pkt_raw(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + void *ring_buffer = hv_get_ring_buffer(ring_info); + struct vmpacket_descriptor *cur_desc; + u32 packetlen; + u32 dsize = ring_info->ring_datasize; + u32 delta = read_loc - ring_info->ring_buffer->read_index; + u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); + + if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) + return NULL; + + if ((read_loc + sizeof(*cur_desc)) > dsize) + return NULL; + + cur_desc = ring_buffer + read_loc; + packetlen = cur_desc->len8 << 3; + + /* + * If the packet under consideration is wrapping around, + * return failure. + */ + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) + return NULL; + + return cur_desc; +} + +/* + * A helper function to step through packets "in-place" + * This API is to be called after each successful call + * get_next_pkt_raw(). + */ +static inline void put_pkt_raw(struct vmbus_channel *channel, + struct vmpacket_descriptor *desc) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + u32 packetlen = desc->len8 << 3; + u32 dsize = ring_info->ring_datasize; + + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) + BUG(); + /* + * Include the packet trailer. + */ + ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; +} + +/* + * This call commits the read index and potentially signals the host. + * Here is the pattern for using the "in-place" consumption APIs: + * + * while (get_next_pkt_raw() { + * process the packet "in-place"; + * put_pkt_raw(); + * } + * if (packets processed in place) + * commit_rd_index(); + */ +static inline void commit_rd_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ + virt_rmb(); + ring_info->ring_buffer->read_index = ring_info->priv_read_index; + + if (hv_need_to_signal_on_read(ring_info)) + vmbus_set_event(channel); +} + + #endif /* _HYPERV_H */ diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h index b5f9a007a3ab..d4c1d12f900d 100644 --- a/include/linux/i2c-mux.h +++ b/include/linux/i2c-mux.h @@ -27,22 +27,49 @@ #ifdef __KERNEL__ +#include <linux/bitops.h> + +struct i2c_mux_core { + struct i2c_adapter *parent; + struct device *dev; + bool mux_locked; + + void *priv; + + int (*select)(struct i2c_mux_core *, u32 chan_id); + int (*deselect)(struct i2c_mux_core *, u32 chan_id); + + int num_adapters; + int max_adapters; + struct i2c_adapter *adapter[0]; +}; + +struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, + struct device *dev, int max_adapters, + int sizeof_priv, u32 flags, + int (*select)(struct i2c_mux_core *, u32), + int (*deselect)(struct i2c_mux_core *, u32)); + +/* flags for i2c_mux_alloc */ +#define I2C_MUX_LOCKED BIT(0) + +static inline void *i2c_mux_priv(struct i2c_mux_core *muxc) +{ + return muxc->priv; +} + +struct i2c_adapter *i2c_root_adapter(struct device *dev); + /* - * Called to create a i2c bus on a multiplexed bus segment. - * The mux_dev and chan_id parameters are passed to the select - * and deselect callback functions to perform hardware-specific - * mux control. + * Called to create an i2c bus on a multiplexed bus segment. + * The chan_id parameter is passed to the select and deselect + * callback functions to perform hardware-specific mux control. */ -struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, - struct device *mux_dev, - void *mux_priv, u32 force_nr, u32 chan_id, - unsigned int class, - int (*select) (struct i2c_adapter *, - void *mux_dev, u32 chan_id), - int (*deselect) (struct i2c_adapter *, - void *mux_dev, u32 chan_id)); - -void i2c_del_mux_adapter(struct i2c_adapter *adap); +int i2c_mux_add_adapter(struct i2c_mux_core *muxc, + u32 force_nr, u32 chan_id, + unsigned int class); + +void i2c_mux_del_adapters(struct i2c_mux_core *muxc); #endif /* __KERNEL__ */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 200cf13b00f6..96a25ae14494 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -524,6 +524,7 @@ struct i2c_adapter { /* data fields that are valid for all devices */ struct rt_mutex bus_lock; + struct rt_mutex mux_lock; int timeout; /* in jiffies */ int retries; @@ -538,6 +539,10 @@ struct i2c_adapter { struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; + + void (*lock_bus)(struct i2c_adapter *, unsigned int flags); + int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); + void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) @@ -567,8 +572,44 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); /* Adapter locking functions, exported for shared pin cases */ -void i2c_lock_adapter(struct i2c_adapter *); -void i2c_unlock_adapter(struct i2c_adapter *); +#define I2C_LOCK_ROOT_ADAPTER BIT(0) +#define I2C_LOCK_SEGMENT BIT(1) + +/** + * i2c_lock_bus - Get exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT + * locks only this branch in the adapter tree + */ +static inline void +i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->lock_bus(adapter, flags); +} + +/** + * i2c_unlock_bus - Release exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT + * unlocks only this branch in the adapter tree + */ +static inline void +i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->unlock_bus(adapter, flags); +} + +static inline void +i2c_lock_adapter(struct i2c_adapter *adapter) +{ + i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); +} + +static inline void +i2c_unlock_adapter(struct i2c_adapter *adapter) +{ + i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); +} /*flags for the client struct: */ #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ @@ -654,6 +695,11 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) return adap->nr; } +static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) +{ + return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); +} + /** * module_i2c_driver() - Helper macro for registering a modular I2C driver * @__i2c_driver: i2c_driver struct diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h deleted file mode 100644 index 52baa79d69a7..000000000000 --- a/include/linux/i2c/sx150x.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Driver for the Semtech SX150x I2C GPIO Expanders - * - * Copyright (c) 2010, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ -#ifndef __LINUX_I2C_SX150X_H -#define __LINUX_I2C_SX150X_H - -/** - * struct sx150x_platform_data - config data for SX150x driver - * @gpio_base: The index number of the first GPIO assigned to this - * GPIO expander. The expander will create a block of - * consecutively numbered gpios beginning at the given base, - * with the size of the block depending on the model of the - * expander chip. - * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO - * instead of as an oscillator, increasing the size of the - * GP(I)O pool created by this expander by one. The - * output-only GPO pin will be added at the end of the block. - * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor - * for each IO line in the expander. Setting the bit at - * position n will enable the pull-up for the IO at - * the corresponding offset. For chips with fewer than - * 16 IO pins, high-end bits are ignored. - * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down - * resistor for each IO line in the expander. Setting the - * bit at position n will enable the pull-down for the IO at - * the corresponding offset. For chips with fewer than - * 16 IO pins, high-end bits are ignored. - * @io_open_drain_ena: A bit-mask which enables-or disables open-drain - * operation for each IO line in the expander. Setting the - * bit at position n enables open-drain operation for - * the IO at the corresponding offset. Clearing the bit - * enables regular push-pull operation for that IO. - * For chips with fewer than 16 IO pins, high-end bits - * are ignored. - * @io_polarity: A bit-mask which enables polarity inversion for each IO line - * in the expander. Setting the bit at position n inverts - * the polarity of that IO line, while clearing it results - * in normal polarity. For chips with fewer than 16 IO pins, - * high-end bits are ignored. - * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line - * is connected, via which it reports interrupt events - * across all GPIO lines. This must be a real, - * pre-existing IRQ line. - * Setting this value < 0 disables the irq_chip functionality - * of the driver. - * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based - * IRQ lines will appear. Similarly to gpio_base, the expander - * will create a block of irqs beginning at this number. - * This value is ignored if irq_summary is < 0. - * @reset_during_probe: If set to true, the driver will trigger a full - * reset of the chip at the beginning of the probe - * in order to place it in a known state. - */ -struct sx150x_platform_data { - unsigned gpio_base; - bool oscio_is_gpo; - u16 io_pullup_ena; - u16 io_pulldn_ena; - u16 io_open_drain_ena; - u16 io_polarity; - int irq_summary; - unsigned irq_base; - bool reset_during_probe; -}; - -#endif /* __LINUX_I2C_SX150X_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 3b1f6cef9513..b118744d3382 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -7,6 +7,7 @@ * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -163,6 +164,9 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ #define IEEE80211_MAX_FRAME_LEN 2352 +/* Maximal size of an A-MSDU that can be transported in a HT BA session */ +#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095 + /* Maximal size of an A-MSDU */ #define IEEE80211_MAX_MPDU_LEN_HT_3839 3839 #define IEEE80211_MAX_MPDU_LEN_HT_7935 7935 @@ -637,6 +641,16 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; } +/** + * ieee80211_is_frag - check if a frame is a fragment + * @hdr: 802.11 header of the frame + */ +static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) +{ + return ieee80211_has_morefrags(hdr->frame_control) || + hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); +} + struct ieee80211s_hdr { u8 flags; u8 ttl; @@ -1011,6 +1025,16 @@ struct ieee80211_mgmt { u8 tpc_elem_length; struct ieee80211_tpc_report_ie tpc; } __packed tpc_report; + struct { + u8 action_code; + u8 dialog_token; + u8 follow_up; + u8 tod[6]; + u8 toa[6]; + __le16 tod_error; + __le16 toa_error; + u8 variable[0]; + } __packed ftm; } u; } __packed action; } u; diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index d3e415674dac..acedbb68a5a3 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -47,6 +47,7 @@ #define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe #define IEEE802154_EXTENDED_ADDR_LEN 8 +#define IEEE802154_SHORT_ADDR_LEN 2 #define IEEE802154_LIFS_PERIOD 40 #define IEEE802154_SIFS_PERIOD 12 @@ -218,6 +219,7 @@ enum { /* frame control handling */ #define IEEE802154_FCTL_FTYPE 0x0003 #define IEEE802154_FCTL_ACKREQ 0x0020 +#define IEEE802154_FCTL_SECEN 0x0004 #define IEEE802154_FCTL_INTRA_PAN 0x0040 #define IEEE802154_FTYPE_DATA 0x0001 @@ -233,6 +235,15 @@ static inline int ieee802154_is_data(__le16 fc) } /** + * ieee802154_is_secen - check if Security bit is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_secen(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_SECEN); +} + +/** * ieee802154_is_ackreq - check if acknowledgment request bit is set * @fc: frame control bytes in little-endian byteorder */ @@ -260,17 +271,17 @@ static inline bool ieee802154_is_intra_pan(__le16 fc) * * @len: psdu len with (MHR + payload + MFR) */ -static inline bool ieee802154_is_valid_psdu_len(const u8 len) +static inline bool ieee802154_is_valid_psdu_len(u8 len) { return (len == IEEE802154_ACK_PSDU_LEN || (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU)); } /** - * ieee802154_is_valid_psdu_len - check if extended addr is valid + * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid * @addr: extended addr to check */ -static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr) +static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr) { /* Bail out if the address is all zero, or if the group * address bit is set. @@ -280,6 +291,34 @@ static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr) } /** + * ieee802154_is_broadcast_short_addr - check if short addr is broadcast + * @addr: short addr to check + */ +static inline bool ieee802154_is_broadcast_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)); +} + +/** + * ieee802154_is_unspec_short_addr - check if short addr is unspecified + * @addr: short addr to check + */ +static inline bool ieee802154_is_unspec_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC)); +} + +/** + * ieee802154_is_valid_src_short_addr - check if source short address is valid + * @addr: short addr to check + */ +static inline bool ieee802154_is_valid_src_short_addr(__le16 addr) +{ + return !(ieee802154_is_broadcast_short_addr(addr) || + ieee802154_is_unspec_short_addr(addr)); +} + +/** * ieee802154_random_extended_addr - generates a random extended address * @addr: extended addr pointer to place the random address */ diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 2ec3ad58e8a0..70a5164f4728 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -83,10 +83,12 @@ struct iio_buffer_access_funcs { * @access: [DRIVER] buffer access functions associated with the * implementation. * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes. + * @buffer_group: [INTERN] attributes of the buffer group * @scan_el_group: [DRIVER] attribute group for those attributes not * created from the iio_chan_info array. * @pollq: [INTERN] wait queue to allow for polling on the buffer. * @stufftoread: [INTERN] flag to indicate new data. + * @attrs: [INTERN] standard attributes of the buffer * @demux_list: [INTERN] list of operations required to demux the scan. * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. * @buffer_list: [INTERN] entry in the devices list of current buffers. diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 6670c3d25c58..d029ffac0d69 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -37,6 +37,7 @@ #define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20 #define ST_SENSORS_DEFAULT_AXIS_MASK 0x07 #define ST_SENSORS_DEFAULT_AXIS_N_BIT 3 +#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27 #define ST_SENSORS_MAX_NAME 17 #define ST_SENSORS_MAX_4WAI 7 @@ -121,6 +122,9 @@ struct st_sensor_bdu { * @mask_int2: mask to enable/disable IRQ on INT2 pin. * @addr_ihl: address to enable/disable active low on the INT lines. * @mask_ihl: mask to enable/disable active low on the INT lines. + * @addr_od: address to enable/disable Open Drain on the INT lines. + * @mask_od: mask to enable/disable Open Drain on the INT lines. + * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. @@ -131,6 +135,9 @@ struct st_sensor_data_ready_irq { u8 mask_int2; u8 addr_ihl; u8 mask_ihl; + u8 addr_od; + u8 mask_od; + u8 addr_stat_drdy; struct { u8 en_addr; u8 en_mask; @@ -212,6 +219,7 @@ struct st_sensor_settings { * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). + * @int_pin_open_drain: Set the interrupt/DRDY to open drain. * @get_irq_data_ready: Function to get the IRQ used for data ready signal. * @tf: Transfer function structure used by I/O operations. * @tb: Transfer buffers and mutex used by I/O operations. @@ -233,6 +241,7 @@ struct st_sensor_data { unsigned int num_data_channels; u8 drdy_int_pin; + bool int_pin_open_drain; unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index fad58671c49e..3d672f72e7ec 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -49,6 +49,33 @@ struct iio_channel *iio_channel_get(struct device *dev, void iio_channel_release(struct iio_channel *chan); /** + * devm_iio_channel_get() - Resource managed version of iio_channel_get(). + * @dev: Pointer to consumer device. Device name must match + * the name of the device as provided in the iio_map + * with which the desired provider to consumer mapping + * was registered. + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels use within + * the consumer. E.g. 'battery_voltage' + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns valid pointer for iio channel. + * + * The allocated iio channel is automatically released when the device is + * unbound. + */ +struct iio_channel *devm_iio_channel_get(struct device *dev, + const char *consumer_channel); +/** + * devm_iio_channel_release() - Resource managed version of + * iio_channel_release(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: The channel to be released. + */ +void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); + +/** * iio_channel_get_all() - get all channels associated with a client * @dev: Pointer to consumer device. * @@ -65,6 +92,32 @@ struct iio_channel *iio_channel_get_all(struct device *dev); */ void iio_channel_release_all(struct iio_channel *chan); +/** + * devm_iio_channel_get_all() - Resource managed version of + * iio_channel_get_all(). + * @dev: Pointer to consumer device. + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns an array of iio_channel structures terminated with one with + * null iio_dev pointer. + * + * This function is used by fairly generic consumers to get all the + * channels registered as having this consumer. + * + * The allocated iio channels are automatically released when the device is + * unbounded. + */ +struct iio_channel *devm_iio_channel_get_all(struct device *dev); + +/** + * devm_iio_channel_release_all() - Resource managed version of + * iio_channel_release_all(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: Array channel to be released. + */ +void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); + struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b2b16772c651..7c29cb0124ae 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -148,6 +148,37 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev, } /** + * struct iio_mount_matrix - iio mounting matrix + * @rotation: 3 dimensional space rotation matrix defining sensor alignment with + * main hardware + */ +struct iio_mount_matrix { + const char *rotation[9]; +}; + +ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, + const struct iio_chan_spec *chan, char *buf); +int of_iio_read_mount_matrix(const struct device *dev, const char *propname, + struct iio_mount_matrix *matrix); + +typedef const struct iio_mount_matrix * + (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan); + +/** + * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute + * @_shared: Whether the attribute is shared between all channels + * @_get: Pointer to an iio_get_mount_matrix_t accessor + */ +#define IIO_MOUNT_MATRIX(_shared, _get) \ +{ \ + .name = "mount_matrix", \ + .shared = (_shared), \ + .read = iio_show_mount_matrix, \ + .private = (uintptr_t)(_get), \ +} + +/** * struct iio_event_spec - specification for a channel event * @type: Type of the event * @dir: Direction of the event @@ -527,6 +558,8 @@ void iio_device_unregister(struct iio_dev *indio_dev); int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev); void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); +int iio_device_claim_direct_mode(struct iio_dev *indio_dev); +void iio_device_release_direct_mode(struct iio_dev *indio_dev); extern struct bus_type iio_bus_type; diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index fa2d01ef8f55..360da7d18a3d 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -41,6 +41,7 @@ struct adis_data { unsigned int diag_stat_reg; unsigned int self_test_mask; + bool self_test_no_autoclear; unsigned int startup_delay; const char * const *status_error_msgs; diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h new file mode 100644 index 000000000000..c8400959d197 --- /dev/null +++ b/include/linux/iio/magnetometer/ak8975.h @@ -0,0 +1,16 @@ +#ifndef __IIO_MAGNETOMETER_AK8975_H__ +#define __IIO_MAGNETOMETER_AK8975_H__ + +#include <linux/iio/iio.h> + +/** + * struct ak8975_platform_data - AK8975 magnetometer driver platform data + * @eoc_gpio: data ready event gpio + * @orientation: mounting matrix relative to main hardware + */ +struct ak8975_platform_data { + int eoc_gpio; + struct iio_mount_matrix orientation; +}; + +#endif diff --git a/include/linux/ima.h b/include/linux/ima.h index e6516cbbe9bf..0eb7c2e7f0d6 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -21,6 +21,7 @@ extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); +extern void ima_post_path_mknod(struct dentry *dentry); #else static inline int ima_bprm_check(struct linux_binprm *bprm) @@ -54,6 +55,11 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size, return 0; } +static inline void ima_post_path_mknod(struct dentry *dentry) +{ + return; +} + #endif /* CONFIG_IMA */ #ifdef CONFIG_IMA_APPRAISE diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index 11d7e840d913..defcc4644ce3 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) writel(val, addr); } +static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl_relaxed(p + 1); + low = readl_relaxed(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val >> 32, addr + 4); + writel_relaxed(val, addr); +} + #ifndef readq #define readq hi_lo_readq #endif @@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) #define writeq hi_lo_writeq #endif +#ifndef readq_relaxed +#define readq_relaxed hi_lo_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed hi_lo_writeq_relaxed +#endif + #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index 1a4315f97360..084461a4e5ab 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) writel(val >> 32, addr + 4); } +static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl_relaxed(p); + high = readl_relaxed(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val, addr); + writel_relaxed(val >> 32, addr + 4); +} + #ifndef readq #define readq lo_hi_readq #endif @@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) #define writeq lo_hi_writeq #endif +#ifndef readq_relaxed +#define readq_relaxed lo_hi_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed lo_hi_writeq_relaxed +#endif + #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ef7a6ecd8584..664683aedcce 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -30,6 +30,7 @@ #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_NOEXEC (1 << 3) +#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ struct iommu_ops; struct iommu_group; @@ -78,6 +79,7 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; const struct iommu_ops *ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; @@ -155,8 +157,7 @@ struct iommu_dm_region { * @domain_set_windows: Set the number of windows for a domain * @domain_get_windows: Return the number of windows for a domain * @of_xlate: add OF master IDs to iommu grouping - * @pgsize_bitmap: bitmap of supported page sizes - * @priv: per-instance data private to the iommu driver + * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -198,7 +199,6 @@ struct iommu_ops { int (*of_xlate)(struct device *dev, struct of_phandle_args *args); unsigned long pgsize_bitmap; - void *priv; }; #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 0b65543dc6cf..6230064d7f95 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -26,6 +26,9 @@ struct resource { /* * IO resources have these defined flags. + * + * PCI devices expose these flags to userspace in the "resource" sysfs file, + * so don't move them. */ #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ @@ -110,6 +113,7 @@ struct resource { /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ +#define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */ /* * I/O Resource Descriptors diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7edc14fb66b6..5c91b0b055d4 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -63,7 +63,8 @@ struct ipv6_devconf { } stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; - void *sysctl; + + struct ctl_table_header *sysctl_header; }; struct ipv6_params { @@ -117,14 +118,29 @@ struct inet6_skb_parm { #define IP6SKB_ROUTERALERT 8 #define IP6SKB_FRAGMENTED 16 #define IP6SKB_HOPBYHOP 32 +#define IP6SKB_L3SLAVE 64 }; +#if defined(CONFIG_NET_L3_MASTER_DEV) +static inline bool skb_l3mdev_slave(__u16 flags) +{ + return flags & IP6SKB_L3SLAVE; +} +#else +static inline bool skb_l3mdev_slave(__u16 flags) +{ + return false; +} +#endif + #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) #define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb)) static inline int inet6_iif(const struct sk_buff *skb) { - return IP6CB(skb)->iif; + bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); + + return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; } struct tcp6_request_sock { diff --git a/include/linux/irq.h b/include/linux/irq.h index c4de62348ff2..4d758a7c604a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -530,6 +530,10 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c } extern int irq_set_percpu_devid(unsigned int irq); +extern int irq_set_percpu_devid_partition(unsigned int irq, + const struct cpumask *affinity); +extern int irq_get_percpu_devid_partition(unsigned int irq, + struct cpumask *affinity); extern void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index 1551b5b2f4c2..f0f5d2671509 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -34,7 +34,7 @@ struct irq_bypass_consumer; /** * struct irq_bypass_producer - IRQ bypass producer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer + * @token: opaque token to match between producer and consumer (non-NULL) * @irq: Linux IRQ number for the producer device * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) @@ -60,7 +60,7 @@ struct irq_bypass_producer { /** * struct irq_bypass_consumer - IRQ bypass consumer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer + * @token: opaque token to match between producer and consumer (non-NULL) * @add_producer: Connect the IRQ consumer to an IRQ producer * @del_producer: Disconnect the IRQ consumer from an IRQ producer * @stop: Perform any quiesce operations necessary prior to add/del (optional) diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h new file mode 100644 index 000000000000..c647b0547bcd --- /dev/null +++ b/include/linux/irqchip/arm-gic-common.h @@ -0,0 +1,34 @@ +/* + * include/linux/irqchip/arm-gic-common.h + * + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H +#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H + +#include <linux/types.h> +#include <linux/ioport.h> + +enum gic_type { + GIC_V2, + GIC_V3, +}; + +struct gic_kvm_info { + /* GIC type */ + enum gic_type type; + /* Virtual CPU interface */ + struct resource vcpu; + /* Interrupt number */ + unsigned int maint_irq; + /* Virtual control interface */ + struct resource vctrl; +}; + +const struct gic_kvm_info *gic_get_kvm_info(void); + +#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index d5d798b35c1f..9e6fdd33bdb2 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -102,8 +102,6 @@ #define GICR_SYNCR 0x00C0 #define GICR_MOVLPIR 0x0100 #define GICR_MOVALLR 0x0110 -#define GICR_ISACTIVER GICD_ISACTIVER -#define GICR_ICACTIVER GICD_ICACTIVER #define GICR_IDREGS GICD_IDREGS #define GICR_PIDR2 GICD_PIDR2 diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h new file mode 100644 index 000000000000..87433a5d1285 --- /dev/null +++ b/include/linux/irqchip/irq-partition-percpu.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/fwnode.h> +#include <linux/cpumask.h> +#include <linux/irqdomain.h> + +struct partition_affinity { + cpumask_t mask; + void *partition_id; +}; + +struct partition_desc; + +#ifdef CONFIG_PARTITION_PERCPU +int partition_translate_id(struct partition_desc *desc, void *partition_id); +struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, + struct partition_affinity *parts, + int nr_parts, + int chained_irq, + const struct irq_domain_ops *ops); +struct irq_domain *partition_get_domain(struct partition_desc *dsc); +#else +static inline int partition_translate_id(struct partition_desc *desc, + void *partition_id) +{ + return -EINVAL; +} + +static inline +struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, + struct partition_affinity *parts, + int nr_parts, + int chained_irq, + const struct irq_domain_ops *ops) +{ + return NULL; +} + +static inline +struct irq_domain *partition_get_domain(struct partition_desc *dsc) +{ + return NULL; +} +#endif diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index 80f89e4a29ac..81f930b0bca9 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h @@ -103,6 +103,7 @@ #define GIC_VPE_SWINT0_MAP_OFS 0x0054 #define GIC_VPE_SWINT1_MAP_OFS 0x0058 #define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VP_IDENT_OFS 0x0088 #define GIC_VPE_WD_CONFIG0_OFS 0x0090 #define GIC_VPE_WD_COUNT0_OFS 0x0094 #define GIC_VPE_WD_INITIAL0_OFS 0x0098 @@ -211,6 +212,10 @@ #define GIC_VPE_SMASK_FDC_SHF 6 #define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF) +/* GIC_VP_IDENT fields */ +#define GIC_VP_IDENT_VCNUM_SHF 0 +#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF) + /* GIC nomenclature for Core Interrupt Pins. */ #define GIC_CPU_INT0 0 /* Core Interrupt 2 */ #define GIC_CPU_INT1 1 /* . */ @@ -278,4 +283,16 @@ static inline int gic_get_usm_range(struct resource *gic_usm_res) #endif /* CONFIG_MIPS_GIC */ +/** + * gic_read_local_vp_id() - read the local VPs VCNUM + * + * Read the VCNUM of the local VP from the GIC_VP_IDENT register and + * return it to the caller. This ID should be used to refer to the VP + * via the GICs VP-other region, or when calculating an offset to a + * bit representing the VP in interrupt masks. + * + * Return: The VCNUM value for the local VP. + */ +extern unsigned gic_read_local_vp_id(void); + #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index dcca77c4b9d2..b51beebf9804 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -66,6 +66,7 @@ struct irq_desc { int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; #ifdef CONFIG_SMP const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 2aed04396210..f1f36e04d885 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -96,6 +96,8 @@ enum irq_domain_bus_token { struct irq_domain_ops { int (*match)(struct irq_domain *d, struct device_node *node, enum irq_domain_bus_token bus_token); + int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token); int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); void (*unmap)(struct irq_domain *d, unsigned int virq); int (*xlate)(struct irq_domain *d, struct device_node *node, @@ -211,7 +213,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); -extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, +extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, @@ -227,6 +229,17 @@ static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) return fwnode && fwnode->type == FWNODE_IRQCHIP; } +static inline +struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, + enum irq_domain_bus_token bus_token) +{ + struct irq_fwspec fwspec = { + .fwnode = fwnode, + }; + + return irq_find_matching_fwspec(&fwspec, bus_token); +} + static inline struct irq_domain *irq_find_matching_host(struct device_node *node, enum irq_domain_bus_token bus_token) { @@ -346,9 +359,8 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, irq_hw_number_t *out_hwirq, unsigned int *out_type); /* IPI functions */ -unsigned int irq_reserve_ipi(struct irq_domain *domain, - const struct cpumask *dest); -void irq_destroy_ipi(unsigned int irq); +int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); +int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); /* V2 interfaces to support hierarchy IRQ domains. */ extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, diff --git a/include/linux/isa.h b/include/linux/isa.h index b0270e3814c8..5ab85281230b 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h @@ -36,4 +36,36 @@ static inline void isa_unregister_driver(struct isa_driver *d) } #endif +/** + * module_isa_driver() - Helper macro for registering a ISA driver + * @__isa_driver: isa_driver struct + * @__num_isa_dev: number of devices to register + * + * Helper macro for ISA drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate code. Each module may only + * use this macro once, and calling it replaces module_init and module_exit. + */ +#define module_isa_driver(__isa_driver, __num_isa_dev) \ +static int __init __isa_driver##_init(void) \ +{ \ + return isa_register_driver(&(__isa_driver), __num_isa_dev); \ +} \ +module_init(__isa_driver##_init); \ +static void __exit __isa_driver##_exit(void) \ +{ \ + isa_unregister_driver(&(__isa_driver)); \ +} \ +module_exit(__isa_driver##_exit); + +/** + * max_num_isa_dev() - Maximum possible number registered of an ISA device + * @__ida_dev_ext: ISA device address extent + * + * The highest base address possible for an ISA device is 0x3FF; this results in + * 1024 possible base addresses. Dividing the number of possible base addresses + * by the address extent taken by each device results in the maximum number of + * devices on a system. + */ +#define max_num_isa_dev(__isa_dev_ext) (1024 / __isa_dev_ext) + #endif /* __LINUX_ISA_H */ diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h index 548d55395488..10923d730486 100644 --- a/include/linux/iscsi_boot_sysfs.h +++ b/include/linux/iscsi_boot_sysfs.h @@ -64,6 +64,12 @@ enum iscsi_boot_initiator_properties_enum { ISCSI_BOOT_INI_END_MARKER, }; +enum iscsi_boot_acpitbl_properties_enum { + ISCSI_BOOT_ACPITBL_SIGNATURE, + ISCSI_BOOT_ACPITBL_OEM_ID, + ISCSI_BOOT_ACPITBL_OEM_TABLE_ID, +}; + struct attribute_group; struct iscsi_boot_kobj { @@ -127,6 +133,13 @@ iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); +struct iscsi_boot_kobj * +iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show)(void *data, int type, char *buf), + umode_t (*is_visible)(void *data, int type), + void (*release)(void *data)); + struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index fd1083c46c61..efb232c5f668 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -403,11 +403,19 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) /* Flags in jbd_inode->i_flags */ #define __JI_COMMIT_RUNNING 0 -/* Commit of the inode data in progress. We use this flag to protect us from +#define __JI_WRITE_DATA 1 +#define __JI_WAIT_DATA 2 + +/* + * Commit of the inode data in progress. We use this flag to protect us from * concurrent deletion of inode. We cannot use reference to inode for this * since we cannot afford doing last iput() on behalf of kjournald */ #define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) +/* Write allocated dirty buffers in this inode before commit */ +#define JI_WRITE_DATA (1 << __JI_WRITE_DATA) +/* Wait for outstanding data writes for this inode before commit */ +#define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** * struct jbd_inode is the structure linking inodes in ordered mode @@ -781,9 +789,6 @@ jbd2_time_diff(unsigned long start, unsigned long end) * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the * number that will fit in j_blocksize * @j_last_sync_writer: most recent pid which did a synchronous write - * @j_history: Buffer storing the transactions statistics history - * @j_history_max: Maximum number of transactions in the statistics history - * @j_history_cur: Current number of transactions in the statistics history * @j_history_lock: Protect the transactions statistics history * @j_proc_entry: procfs entry for the jbd statistics directory * @j_stats: Overall statistics @@ -1270,7 +1275,8 @@ extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); -extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h new file mode 100644 index 000000000000..b7f8aced7870 --- /dev/null +++ b/include/linux/kasan-checks.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_KASAN_CHECKS_H +#define _LINUX_KASAN_CHECKS_H + +#ifdef CONFIG_KASAN +void kasan_check_read(const void *p, unsigned int size); +void kasan_check_write(const void *p, unsigned int size); +#else +static inline void kasan_check_read(const void *p, unsigned int size) { } +static inline void kasan_check_write(const void *p, unsigned int size) { } +#endif + +#endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 737371b56044..611927f5870d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -50,6 +50,8 @@ void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags); +void kasan_cache_shrink(struct kmem_cache *cache); +void kasan_cache_destroy(struct kmem_cache *cache); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); @@ -63,7 +65,8 @@ void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); -void kasan_slab_free(struct kmem_cache *s, void *object); +bool kasan_slab_free(struct kmem_cache *s, void *object); +void kasan_poison_slab_free(struct kmem_cache *s, void *object); struct kasan_cache { int alloc_meta_offset; @@ -88,6 +91,8 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) {} +static inline void kasan_cache_shrink(struct kmem_cache *cache) {} +static inline void kasan_cache_destroy(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, @@ -105,7 +110,11 @@ static inline void kasan_krealloc(const void *object, size_t new_size, static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) {} -static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} +static inline bool kasan_slab_free(struct kmem_cache *s, void *object) +{ + return false; +} +static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {} static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2f7775e229b0..94aa10ffe156 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -53,6 +53,13 @@ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define u64_to_user_ptr(x) ( \ +{ \ + typecheck(u64, x); \ + (void __user *)(uintptr_t)x; \ +} \ +) + /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be @@ -412,9 +419,9 @@ extern __printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...); extern __printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); -extern __printf(2, 3) +extern __printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...); -extern __printf(2, 0) +extern __printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); extern __printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 30f089ebe0a4..96356ef012de 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -179,6 +179,7 @@ struct kernfs_open_file { /* private fields, do not use outside kernfs proper */ struct mutex mutex; + struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 2cc643c6e870..e8acb2b43dd9 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -230,8 +230,6 @@ extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); void crash_save_cpu(struct pt_regs *regs, int cpu); void crash_save_vmcoreinfo(void); -void crash_map_reserved_pages(void); -void crash_unmap_reserved_pages(void); void arch_crash_save_vmcoreinfo(void); __printf(1, 2) void vmcoreinfo_append_str(const char *fmt, ...); @@ -317,6 +315,8 @@ int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int relsec); int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int relsec); +void arch_kexec_protect_crashkres(void); +void arch_kexec_unprotect_crashkres(void); #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 7463355a198b..eaee981c5558 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -45,7 +45,6 @@ struct key_preparsed_payload { size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ time_t expiry; /* Expiry time of key */ - bool trusted; /* True if key is trusted */ }; typedef int (*request_key_actor_t)(struct key_construction *key, diff --git a/include/linux/key.h b/include/linux/key.h index 5f5b1129dc92..722914798f37 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -173,11 +173,9 @@ struct key { #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ -#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */ -#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */ -#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */ -#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */ -#define KEY_FLAG_KEEP 12 /* set if key should not be removed */ +#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ +#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ +#define KEY_FLAG_KEEP 10 /* set if key should not be removed */ /* the key type and key description string * - the desc is used to match a key against search criteria @@ -205,6 +203,20 @@ struct key { }; int reject_error; }; + + /* This is set on a keyring to restrict the addition of a link to a key + * to it. If this method isn't provided then it is assumed that the + * keyring is open to any addition. It is ignored for non-keyring + * keys. + * + * This is intended for use with rings of trusted keys whereby addition + * to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION + * overrides this, allowing the kernel to add extra keys without + * restriction. + */ + int (*restrict_link)(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); }; extern struct key *key_alloc(struct key_type *type, @@ -212,14 +224,17 @@ extern struct key *key_alloc(struct key_type *type, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, - unsigned long flags); + unsigned long flags, + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *)); -#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ -#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ -#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ -#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */ -#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */ +#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ +#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ +#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ +#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ +#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); @@ -288,8 +303,15 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid const struct cred *cred, key_perm_t perm, unsigned long flags, + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *), struct key *dest); +extern int restrict_link_reject(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); + extern int keyring_clear(struct key *keyring); extern key_ref_t keyring_search(key_ref_t keyring, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5276fe0916fc..b1fa8f11c95b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -35,6 +35,10 @@ #include <asm/kvm_host.h> +#ifndef KVM_MAX_VCPU_ID +#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#endif + /* * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used * in kvm, other bits are visible for userspace which are defined in @@ -225,6 +229,7 @@ struct kvm_vcpu { sigset_t sigset; struct kvm_vcpu_stat stat; unsigned int halt_poll_ns; + bool valid_wakeup; #ifdef CONFIG_HAS_IOMEM int mmio_needed; @@ -447,12 +452,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { - struct kvm_vcpu *vcpu; + struct kvm_vcpu *vcpu = NULL; int i; - if (id < 0 || id >= KVM_MAX_VCPUS) + if (id < 0) return NULL; - vcpu = kvm_get_vcpu(kvm, id); + if (id < KVM_MAX_VCPUS) + vcpu = kvm_get_vcpu(kvm, id); if (vcpu && vcpu->vcpu_id == id) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -651,6 +657,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); +void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); @@ -1091,6 +1098,11 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { + /* + * Ensure the rest of the request is published to kvm_check_request's + * caller. Paired with the smp_mb__after_atomic in kvm_check_request. + */ + smp_wmb(); set_bit(req, &vcpu->requests); } @@ -1098,6 +1110,12 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { if (test_bit(req, &vcpu->requests)) { clear_bit(req, &vcpu->requests); + + /* + * Ensure the rest of the request is visible to kvm_check_request's + * caller. Paired with the smp_wmb in kvm_make_request. + */ + smp_mb__after_atomic(); return true; } else { return false; @@ -1169,6 +1187,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, @@ -1179,4 +1198,18 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ +#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS +/* If we wakeup during the poll time, was it a sucessful poll? */ +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return vcpu->valid_wakeup; +} + +#else +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return true; +} +#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ + #endif diff --git a/include/linux/leds.h b/include/linux/leds.h index f203a8f89d30..d2b13066e781 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -50,6 +50,7 @@ struct led_classdev { #define LED_SYSFS_DISABLE (1 << 22) #define LED_DEV_CAP_FLASH (1 << 23) #define LED_HW_PLUGGABLE (1 << 24) +#define LED_PANIC_INDICATOR (1 << 25) /* Set LED brightness level * Must not sleep. Use brightness_set_blocking for drivers @@ -329,6 +330,12 @@ extern void ledtrig_ide_activity(void); static inline void ledtrig_ide_activity(void) {} #endif +#ifdef CONFIG_LEDS_TRIGGER_MTD +extern void ledtrig_mtd_activity(void); +#else +static inline void ledtrig_mtd_activity(void) {} +#endif + #if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE) extern void ledtrig_flash_ctrl(bool on); extern void ledtrig_torch_ctrl(bool on); @@ -358,6 +365,7 @@ struct gpio_led { unsigned gpio; unsigned active_low : 1; unsigned retain_state_suspended : 1; + unsigned panic_indicator : 1; unsigned default_state : 2; /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ struct gpio_desc *gpiod; diff --git a/include/linux/libata.h b/include/linux/libata.h index 2c4ebef79d0c..d15c19e331d1 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -180,6 +180,8 @@ enum { ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */ ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */ + ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ + ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ @@ -191,7 +193,8 @@ enum { ATA_DEV_SEMB = 7, /* SEMB */ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ ATA_DEV_ZAC = 9, /* ZAC device */ - ATA_DEV_NONE = 10, /* no device */ + ATA_DEV_ZAC_UNSUP = 10, /* ZAC device (unsupported) */ + ATA_DEV_NONE = 11, /* no device */ /* struct ata_link flags */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ @@ -727,6 +730,13 @@ struct ata_device { /* NCQ send and receive log subcommand support */ u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE]; + u8 ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_SIZE]; + + /* ZAC zone configuration */ + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; /* error history */ int spdn_cnt; @@ -1523,7 +1533,8 @@ static inline unsigned int ata_class_enabled(unsigned int class) static inline unsigned int ata_class_disabled(unsigned int class) { return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP || - class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP; + class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP || + class == ATA_DEV_ZAC_UNSUP; } static inline unsigned int ata_class_absent(unsigned int class) @@ -1641,6 +1652,26 @@ static inline bool ata_fpdma_dsm_supported(struct ata_device *dev) ATA_LOG_NCQ_SEND_RECV_DSM_TRIM); } +static inline bool ata_fpdma_read_log_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_in_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_out_supported(struct ata_device *dev) +{ + return (dev->ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT); +} + static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) { qc->tf.ctl |= ATA_NIEN; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 833867b9ddc2..0c3c30cbbea5 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -27,7 +27,7 @@ enum { /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, ND_CMD_MAX_ELEM = 5, - ND_CMD_MAX_ENVELOPE = 16, + ND_CMD_MAX_ENVELOPE = 256, ND_MAX_MAPPINGS = 32, /* region flag indicating to direct-map persistent memory by default */ @@ -68,7 +68,7 @@ struct nd_mapping { struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; - unsigned long dsm_mask; + unsigned long cmd_mask; char *provider_name; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); @@ -130,10 +130,11 @@ struct nd_region *to_nd_region(struct device *dev); struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); const char *nvdimm_name(struct nvdimm *nvdimm); +unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); void *nvdimm_provider_data(struct nvdimm *nvdimm); struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, - unsigned long *dsm_mask); + unsigned long cmd_mask); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index cdcb2ccbefa8..ef2c7d2e76c4 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -18,7 +18,7 @@ enum { #define NVM_SEC_BITS (8) #define NVM_PL_BITS (8) #define NVM_LUN_BITS (8) -#define NVM_CH_BITS (8) +#define NVM_CH_BITS (7) struct ppa_addr { /* Generic structure for all addresses */ @@ -30,8 +30,14 @@ struct ppa_addr { u64 pl : NVM_PL_BITS; u64 lun : NVM_LUN_BITS; u64 ch : NVM_CH_BITS; + u64 reserved : 1; } g; + struct { + u64 line : 63; + u64 is_cached : 1; + } c; + u64 ppa; }; }; @@ -41,13 +47,11 @@ struct nvm_id; struct nvm_dev; typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); -typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, nvm_l2p_update_fn *, void *); -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, - nvm_bb_update_fn *, void *); -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); +typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); @@ -202,6 +206,7 @@ struct nvm_id { struct nvm_target { struct list_head list; + struct nvm_dev *dev; struct nvm_tgt_type *type; struct gendisk *disk; }; @@ -232,14 +237,14 @@ struct nvm_rq { struct ppa_addr *ppa_list; - void *metadata; - dma_addr_t dma_metadata; + void *meta_list; + dma_addr_t dma_meta_list; struct completion *wait; nvm_end_io_fn *end_io; uint8_t opcode; - uint16_t nr_pages; + uint16_t nr_ppas; uint16_t flags; u64 ppa_status; /* ppa media status */ @@ -307,7 +312,6 @@ struct nvm_dev { struct nvm_dev_ops *ops; struct list_head devices; - struct list_head online_targets; /* Media manager */ struct nvmm_type *mt; @@ -323,6 +327,8 @@ struct nvm_dev { int sec_per_pg; /* only sectors for a single page */ int pgs_per_blk; int blks_per_lun; + int fpg_size; + int pfpg_size; /* size of buffer if all pages are to be read */ int sec_size; int oob_size; int mccap; @@ -345,10 +351,9 @@ struct nvm_dev { unsigned long total_blocks; unsigned long total_secs; int nr_luns; - unsigned max_pages_per_blk; unsigned long *lun_map; - void *ppalist_pool; + void *dma_pool; struct nvm_id identity; @@ -450,8 +455,8 @@ struct nvm_tgt_type { struct list_head list; }; -extern int nvm_register_target(struct nvm_tgt_type *); -extern void nvm_unregister_target(struct nvm_tgt_type *); +extern int nvm_register_tgt_type(struct nvm_tgt_type *); +extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); @@ -467,6 +472,7 @@ typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, unsigned long); +typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int); typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); typedef int (nvmm_reserve_lun)(struct nvm_dev *, int); typedef void (nvmm_release_lun)(struct nvm_dev *, int); @@ -494,6 +500,9 @@ struct nvmm_type { nvmm_submit_io_fn *submit_io; nvmm_erase_blk_fn *erase_blk; + /* Bad block mgmt */ + nvmm_mark_blk_fn *mark_blk; + /* Configuration management */ nvmm_get_lun_fn *get_lun; nvmm_reserve_lun *reserve_lun; @@ -527,13 +536,17 @@ extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, - struct ppa_addr *, int); + struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); extern void nvm_end_io(struct nvm_rq *, int); extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, void *, int); +extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, + int, void *, int); +extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); +extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); /* sysblk.c */ #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ @@ -554,6 +567,13 @@ extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_dev_factory(struct nvm_dev *, int flags); + +#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \ + for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \ + (chid)++, (ppa).g.ch = (chid)) \ + for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \ + (lunid)++, (ppa).g.lun = (lunid)) + #else /* CONFIG_NVM */ struct nvm_dev_ops; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index bd830d590465..a93a0b23dc8d 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -65,27 +65,8 @@ struct klp_func { }; /** - * struct klp_reloc - relocation structure for live patching - * @loc: address where the relocation will be written - * @sympos: position in kallsyms to disambiguate symbols (optional) - * @type: ELF relocation type - * @name: name of the referenced symbol (for lookup/verification) - * @addend: offset from the referenced symbol - * @external: symbol is either exported or within the live patch module itself - */ -struct klp_reloc { - unsigned long loc; - unsigned long sympos; - unsigned long type; - const char *name; - int addend; - int external; -}; - -/** * struct klp_object - kernel object structure for live patching * @name: module name (or NULL for vmlinux) - * @relocs: relocation entries to be applied at load time * @funcs: function entries for functions to be patched in the object * @kobj: kobject for sysfs resources * @mod: kernel module associated with the patched object @@ -95,7 +76,6 @@ struct klp_reloc { struct klp_object { /* external */ const char *name; - struct klp_reloc *relocs; struct klp_func *funcs; /* internal */ @@ -124,10 +104,12 @@ struct klp_patch { }; #define klp_for_each_object(patch, obj) \ - for (obj = patch->objs; obj->funcs; obj++) + for (obj = patch->objs; obj->funcs || obj->name; obj++) #define klp_for_each_func(obj, func) \ - for (func = obj->funcs; func->old_name; func++) + for (func = obj->funcs; \ + func->old_name || func->new_func || func->old_sympos; \ + func++) int klp_register_patch(struct klp_patch *); int klp_unregister_patch(struct klp_patch *); diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index cdee11cbcdf1..7ae397669d8b 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1190,7 +1190,8 @@ * Return 0 if permission is granted. * @settime: * Check permission to change the system time. - * struct timespec and timezone are defined in include/linux/time.h + * struct timespec64 is defined in include/linux/time64.h and timezone + * is defined in include/linux/time.h * @ts contains new time * @tz contains new timezone * Return 0 if permission is granted. @@ -1327,7 +1328,7 @@ union security_list_options { int (*quotactl)(int cmds, int type, int id, struct super_block *sb); int (*quota_on)(struct dentry *dentry); int (*syslog)(int type); - int (*settime)(const struct timespec *ts, const struct timezone *tz); + int (*settime)(const struct timespec64 *ts, const struct timezone *tz); int (*vm_enough_memory)(struct mm_struct *mm, long pages); int (*bprm_set_creds)(struct linux_binprm *bprm); @@ -1343,10 +1344,10 @@ union security_list_options { int (*sb_kern_mount)(struct super_block *sb, int flags, void *data); int (*sb_show_options)(struct seq_file *m, struct super_block *sb); int (*sb_statfs)(struct dentry *dentry); - int (*sb_mount)(const char *dev_name, struct path *path, + int (*sb_mount)(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data); int (*sb_umount)(struct vfsmount *mnt, int flags); - int (*sb_pivotroot)(struct path *old_path, struct path *new_path); + int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); int (*sb_set_mnt_opts)(struct super_block *sb, struct security_mnt_opts *opts, unsigned long kern_flags, @@ -1360,23 +1361,23 @@ union security_list_options { #ifdef CONFIG_SECURITY_PATH - int (*path_unlink)(struct path *dir, struct dentry *dentry); - int (*path_mkdir)(struct path *dir, struct dentry *dentry, + int (*path_unlink)(const struct path *dir, struct dentry *dentry); + int (*path_mkdir)(const struct path *dir, struct dentry *dentry, umode_t mode); - int (*path_rmdir)(struct path *dir, struct dentry *dentry); - int (*path_mknod)(struct path *dir, struct dentry *dentry, + int (*path_rmdir)(const struct path *dir, struct dentry *dentry); + int (*path_mknod)(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); - int (*path_truncate)(struct path *path); - int (*path_symlink)(struct path *dir, struct dentry *dentry, + int (*path_truncate)(const struct path *path); + int (*path_symlink)(const struct path *dir, struct dentry *dentry, const char *old_name); - int (*path_link)(struct dentry *old_dentry, struct path *new_dir, + int (*path_link)(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry); - int (*path_rename)(struct path *old_dir, struct dentry *old_dentry, - struct path *new_dir, + int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry); - int (*path_chmod)(struct path *path, umode_t mode); - int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid); - int (*path_chroot)(struct path *path); + int (*path_chmod)(const struct path *path, umode_t mode); + int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); + int (*path_chroot)(const struct path *path); #endif int (*inode_alloc_security)(struct inode *inode); @@ -1804,7 +1805,6 @@ struct security_hook_heads { struct list_head tun_dev_attach_queue; struct list_head tun_dev_attach; struct list_head tun_dev_open; - struct list_head skb_owned_by; #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM struct list_head xfrm_policy_alloc_security; @@ -1893,5 +1893,10 @@ extern void __init yama_add_hooks(void); #else static inline void __init yama_add_hooks(void) { } #endif +#ifdef CONFIG_SECURITY_LOADPIN +void __init loadpin_add_hooks(void); +#else +static inline void loadpin_add_hooks(void) { }; +#endif #endif /* ! __LINUX_LSM_HOOKS_H */ diff --git a/include/linux/mcb.h b/include/linux/mcb.h index ed06e15a36aa..ead13d233a97 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -15,22 +15,30 @@ #include <linux/device.h> #include <linux/irqreturn.h> +#define CHAMELEON_FILENAME_LEN 12 + struct mcb_driver; struct mcb_device; /** * struct mcb_bus - MEN Chameleon Bus * - * @dev: pointer to carrier device - * @children: the child busses + * @dev: bus device + * @carrier: pointer to carrier device * @bus_nr: mcb bus number * @get_irq: callback to get IRQ number + * @revision: the FPGA's revision number + * @model: the FPGA's model number + * @filename: the FPGA's name */ struct mcb_bus { - struct list_head children; struct device dev; struct device *carrier; int bus_nr; + u8 revision; + char model; + u8 minor; + char name[CHAMELEON_FILENAME_LEN + 1]; int (*get_irq)(struct mcb_device *dev); }; #define to_mcb_bus(b) container_of((b), struct mcb_bus, dev) diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 5bfd99d1a40a..bf9d1d750693 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -13,6 +13,17 @@ struct mii_bus; +/* Multiple levels of nesting are possible. However typically this is + * limited to nested DSA like layer, a MUX layer, and the normal + * user. Instead of trying to handle the general case, just define + * these cases. + */ +enum mdio_mutex_lock_class { + MDIO_MUTEX_NORMAL, + MDIO_MUTEX_MUX, + MDIO_MUTEX_NESTED, +}; + struct mdio_device { struct device dev; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1191d79aa495..a805474df4ab 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -415,25 +415,6 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) return mz->lru_size[lru]; } -static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - unsigned long inactive_ratio; - unsigned long inactive; - unsigned long active; - unsigned long gb; - - inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); - active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); - - gb = (inactive + active) >> (30 - PAGE_SHIFT); - if (gb) - inactive_ratio = int_sqrt(10 * gb); - else - inactive_ratio = 1; - - return inactive * inactive_ratio < active; -} - void mem_cgroup_handle_over_high(void); void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, @@ -646,24 +627,12 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return true; } -static inline bool -mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - return true; -} - static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { return 0; } -static inline void -mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, - int increment) -{ -} - static inline unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index adbef586e696..20d8a5d4d133 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -247,16 +247,16 @@ static inline void mem_hotplug_done(void) {} #ifdef CONFIG_MEMORY_HOTREMOVE -extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); +extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern void remove_memory(int nid, u64 start, u64 size); #else -static inline int is_mem_section_removable(unsigned long pfn, +static inline bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages) { - return 0; + return false; } static inline void try_offline_node(int nid) {} diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 2696c1f05ed1..4429d255c8ab 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -172,14 +172,14 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol); extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ -static inline int vma_migratable(struct vm_area_struct *vma) +static inline bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) - return 0; + return false; #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION if (vma->vm_flags & VM_HUGETLB) - return 0; + return false; #endif /* @@ -190,8 +190,8 @@ static inline int vma_migratable(struct vm_area_struct *vma) if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) - return 0; - return 1; + return false; + return true; } extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); @@ -228,6 +228,12 @@ static inline void mpol_free_shared_policy(struct shared_policy *p) { } +static inline struct mempolicy * +mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +{ + return NULL; +} + #define vma_policy(vma) NULL static inline int diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 69b6951e8fd2..b1086c936507 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -5,6 +5,7 @@ #define _LINUX_MEMPOOL_H #include <linux/wait.h> +#include <linux/compiler.h> struct kmem_cache; @@ -31,7 +32,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, extern int mempool_resize(mempool_t *pool, int new_min_nr); extern void mempool_destroy(mempool_t *pool); -extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); +extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; extern void mempool_free(void *element, mempool_t *pool); /* diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h index 8d43e9f2a842..51e6f9414575 100644 --- a/include/linux/mfd/as3722.h +++ b/include/linux/mfd/as3722.h @@ -196,6 +196,7 @@ #define AS3722_LDO3_VSEL_MIN 0x01 #define AS3722_LDO3_VSEL_MAX 0x2D #define AS3722_LDO3_NUM_VOLT 0x2D +#define AS3722_LDO6_VSEL_BYPASS 0x3F #define AS3722_LDO_VSEL_MASK 0x7F #define AS3722_LDO_VSEL_MIN 0x01 #define AS3722_LDO_VSEL_MAX 0x7F diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index d82e7d51372b..0be4982f08fe 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -20,6 +20,7 @@ enum { AXP221_ID, AXP223_ID, AXP288_ID, + AXP809_ID, NR_AXP20X_VARIANTS, }; @@ -264,6 +265,29 @@ enum { AXP22X_REG_ID_MAX, }; +enum { + AXP809_DCDC1 = 0, + AXP809_DCDC2, + AXP809_DCDC3, + AXP809_DCDC4, + AXP809_DCDC5, + AXP809_DC1SW, + AXP809_DC5LDO, + AXP809_ALDO1, + AXP809_ALDO2, + AXP809_ALDO3, + AXP809_ELDO1, + AXP809_ELDO2, + AXP809_ELDO3, + AXP809_DLDO1, + AXP809_DLDO2, + AXP809_RTC_LDO, + AXP809_LDO_IO0, + AXP809_LDO_IO1, + AXP809_SW, + AXP809_REG_ID_MAX, +}; + /* IRQs */ enum { AXP152_IRQ_LDO0IN_CONNECT = 1, @@ -390,6 +414,41 @@ enum axp288_irqs { AXP288_IRQ_BC_USB_CHNG, }; +enum axp809_irqs { + AXP809_IRQ_ACIN_OVER_V = 1, + AXP809_IRQ_ACIN_PLUGIN, + AXP809_IRQ_ACIN_REMOVAL, + AXP809_IRQ_VBUS_OVER_V, + AXP809_IRQ_VBUS_PLUGIN, + AXP809_IRQ_VBUS_REMOVAL, + AXP809_IRQ_VBUS_V_LOW, + AXP809_IRQ_BATT_PLUGIN, + AXP809_IRQ_BATT_REMOVAL, + AXP809_IRQ_BATT_ENT_ACT_MODE, + AXP809_IRQ_BATT_EXIT_ACT_MODE, + AXP809_IRQ_CHARG, + AXP809_IRQ_CHARG_DONE, + AXP809_IRQ_BATT_CHG_TEMP_HIGH, + AXP809_IRQ_BATT_CHG_TEMP_HIGH_END, + AXP809_IRQ_BATT_CHG_TEMP_LOW, + AXP809_IRQ_BATT_CHG_TEMP_LOW_END, + AXP809_IRQ_BATT_ACT_TEMP_HIGH, + AXP809_IRQ_BATT_ACT_TEMP_HIGH_END, + AXP809_IRQ_BATT_ACT_TEMP_LOW, + AXP809_IRQ_BATT_ACT_TEMP_LOW_END, + AXP809_IRQ_DIE_TEMP_HIGH, + AXP809_IRQ_LOW_PWR_LVL1, + AXP809_IRQ_LOW_PWR_LVL2, + AXP809_IRQ_TIMER, + AXP809_IRQ_PEK_RIS_EDGE, + AXP809_IRQ_PEK_FAL_EDGE, + AXP809_IRQ_PEK_SHORT, + AXP809_IRQ_PEK_LONG, + AXP809_IRQ_PEK_OVER_OFF, + AXP809_IRQ_GPIO1_INPUT, + AXP809_IRQ_GPIO0_INPUT, +}; + #define AXP288_TS_ADC_H 0x58 #define AXP288_TS_ADC_L 0x59 #define AXP288_GP_ADC_H 0x5a diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index bc6f7e00fb3d..99c0395fe1f9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -17,7 +17,7 @@ #include <linux/platform_device.h> struct irq_domain; -struct property_set; +struct property_entry; /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ struct mfd_cell_acpi_match { @@ -47,7 +47,7 @@ struct mfd_cell { size_t pdata_size; /* device properties passed to the sub devices drivers */ - const struct property_set *pset; + struct property_entry *properties; /* * Device Tree compatible string @@ -131,4 +131,8 @@ static inline int mfd_add_hotplug_devices(struct device *parent, extern void mfd_remove_devices(struct device *parent); +extern int devm_mfd_add_devices(struct device *dev, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base, struct irq_domain *irq_domain); #endif diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h new file mode 100644 index 000000000000..dbbe9a644622 --- /dev/null +++ b/include/linux/mfd/hi655x-pmic.h @@ -0,0 +1,55 @@ +/* + * Device driver for regulators in hi655x IC + * + * Copyright (c) 2016 Hisilicon. + * + * Authors: + * Chen Feng <puck.chen@hisilicon.com> + * Fei Wang <w.f@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __HI655X_PMIC_H +#define __HI655X_PMIC_H + +/* Hi655x registers are mapped to memory bus in 4 bytes stride */ +#define HI655X_STRIDE 4 +#define HI655X_BUS_ADDR(x) ((x) << 2) + +#define HI655X_BITS 8 + +#define HI655X_NR_IRQ 32 + +#define HI655X_IRQ_STAT_BASE (0x003 << 2) +#define HI655X_IRQ_MASK_BASE (0x007 << 2) +#define HI655X_ANA_IRQM_BASE (0x1b5 << 2) +#define HI655X_IRQ_ARRAY 4 +#define HI655X_IRQ_MASK 0xFF +#define HI655X_IRQ_CLR 0xFF +#define HI655X_VER_REG 0x00 + +#define PMU_VER_START 0x10 +#define PMU_VER_END 0x38 + +#define RESERVE_INT BIT(7) +#define PWRON_D20R_INT BIT(6) +#define PWRON_D20F_INT BIT(5) +#define PWRON_D4SR_INT BIT(4) +#define VSYS_6P0_D200UR_INT BIT(3) +#define VSYS_UV_D3R_INT BIT(2) +#define VSYS_2P5_R_INT BIT(1) +#define OTMP_D1R_INT BIT(0) + +struct hi655x_pmic { + struct resource *res; + struct device *dev; + struct regmap *regmap; + int gpio; + unsigned int ver; + struct regmap_irq_chip_data *irq_data; +}; + +#endif diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h new file mode 100644 index 000000000000..3ca0af07fc78 --- /dev/null +++ b/include/linux/mfd/max77620.h @@ -0,0 +1,346 @@ +/* + * Defining registers address and its bit definitions of MAX77620 and MAX20024 + * + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _MFD_MAX77620_H_ +#define _MFD_MAX77620_H_ + +#include <linux/types.h> + +/* GLOBAL, PMIC, GPIO, FPS, ONOFFC, CID Registers */ +#define MAX77620_REG_CNFGGLBL1 0x00 +#define MAX77620_REG_CNFGGLBL2 0x01 +#define MAX77620_REG_CNFGGLBL3 0x02 +#define MAX77620_REG_CNFG1_32K 0x03 +#define MAX77620_REG_CNFGBBC 0x04 +#define MAX77620_REG_IRQTOP 0x05 +#define MAX77620_REG_INTLBT 0x06 +#define MAX77620_REG_IRQSD 0x07 +#define MAX77620_REG_IRQ_LVL2_L0_7 0x08 +#define MAX77620_REG_IRQ_LVL2_L8 0x09 +#define MAX77620_REG_IRQ_LVL2_GPIO 0x0A +#define MAX77620_REG_ONOFFIRQ 0x0B +#define MAX77620_REG_NVERC 0x0C +#define MAX77620_REG_IRQTOPM 0x0D +#define MAX77620_REG_INTENLBT 0x0E +#define MAX77620_REG_IRQMASKSD 0x0F +#define MAX77620_REG_IRQ_MSK_L0_7 0x10 +#define MAX77620_REG_IRQ_MSK_L8 0x11 +#define MAX77620_REG_ONOFFIRQM 0x12 +#define MAX77620_REG_STATLBT 0x13 +#define MAX77620_REG_STATSD 0x14 +#define MAX77620_REG_ONOFFSTAT 0x15 + +/* SD and LDO Registers */ +#define MAX77620_REG_SD0 0x16 +#define MAX77620_REG_SD1 0x17 +#define MAX77620_REG_SD2 0x18 +#define MAX77620_REG_SD3 0x19 +#define MAX77620_REG_SD4 0x1A +#define MAX77620_REG_DVSSD0 0x1B +#define MAX77620_REG_DVSSD1 0x1C +#define MAX77620_REG_SD0_CFG 0x1D +#define MAX77620_REG_SD1_CFG 0x1E +#define MAX77620_REG_SD2_CFG 0x1F +#define MAX77620_REG_SD3_CFG 0x20 +#define MAX77620_REG_SD4_CFG 0x21 +#define MAX77620_REG_SD_CFG2 0x22 +#define MAX77620_REG_LDO0_CFG 0x23 +#define MAX77620_REG_LDO0_CFG2 0x24 +#define MAX77620_REG_LDO1_CFG 0x25 +#define MAX77620_REG_LDO1_CFG2 0x26 +#define MAX77620_REG_LDO2_CFG 0x27 +#define MAX77620_REG_LDO2_CFG2 0x28 +#define MAX77620_REG_LDO3_CFG 0x29 +#define MAX77620_REG_LDO3_CFG2 0x2A +#define MAX77620_REG_LDO4_CFG 0x2B +#define MAX77620_REG_LDO4_CFG2 0x2C +#define MAX77620_REG_LDO5_CFG 0x2D +#define MAX77620_REG_LDO5_CFG2 0x2E +#define MAX77620_REG_LDO6_CFG 0x2F +#define MAX77620_REG_LDO6_CFG2 0x30 +#define MAX77620_REG_LDO7_CFG 0x31 +#define MAX77620_REG_LDO7_CFG2 0x32 +#define MAX77620_REG_LDO8_CFG 0x33 +#define MAX77620_REG_LDO8_CFG2 0x34 +#define MAX77620_REG_LDO_CFG3 0x35 + +#define MAX77620_LDO_SLEW_RATE_MASK 0x1 + +/* LDO Configuration 3 */ +#define MAX77620_TRACK4_MASK BIT(5) +#define MAX77620_TRACK4_SHIFT 5 + +/* Voltage */ +#define MAX77620_SDX_VOLT_MASK 0xFF +#define MAX77620_SD0_VOLT_MASK 0x3F +#define MAX77620_SD1_VOLT_MASK 0x7F +#define MAX77620_LDO_VOLT_MASK 0x3F + +#define MAX77620_REG_GPIO0 0x36 +#define MAX77620_REG_GPIO1 0x37 +#define MAX77620_REG_GPIO2 0x38 +#define MAX77620_REG_GPIO3 0x39 +#define MAX77620_REG_GPIO4 0x3A +#define MAX77620_REG_GPIO5 0x3B +#define MAX77620_REG_GPIO6 0x3C +#define MAX77620_REG_GPIO7 0x3D +#define MAX77620_REG_PUE_GPIO 0x3E +#define MAX77620_REG_PDE_GPIO 0x3F +#define MAX77620_REG_AME_GPIO 0x40 +#define MAX77620_REG_ONOFFCNFG1 0x41 +#define MAX77620_REG_ONOFFCNFG2 0x42 + +/* FPS Registers */ +#define MAX77620_REG_FPS_CFG0 0x43 +#define MAX77620_REG_FPS_CFG1 0x44 +#define MAX77620_REG_FPS_CFG2 0x45 +#define MAX77620_REG_FPS_LDO0 0x46 +#define MAX77620_REG_FPS_LDO1 0x47 +#define MAX77620_REG_FPS_LDO2 0x48 +#define MAX77620_REG_FPS_LDO3 0x49 +#define MAX77620_REG_FPS_LDO4 0x4A +#define MAX77620_REG_FPS_LDO5 0x4B +#define MAX77620_REG_FPS_LDO6 0x4C +#define MAX77620_REG_FPS_LDO7 0x4D +#define MAX77620_REG_FPS_LDO8 0x4E +#define MAX77620_REG_FPS_SD0 0x4F +#define MAX77620_REG_FPS_SD1 0x50 +#define MAX77620_REG_FPS_SD2 0x51 +#define MAX77620_REG_FPS_SD3 0x52 +#define MAX77620_REG_FPS_SD4 0x53 +#define MAX77620_REG_FPS_NONE 0 + +#define MAX77620_FPS_SRC_MASK 0xC0 +#define MAX77620_FPS_SRC_SHIFT 6 +#define MAX77620_FPS_PU_PERIOD_MASK 0x38 +#define MAX77620_FPS_PU_PERIOD_SHIFT 3 +#define MAX77620_FPS_PD_PERIOD_MASK 0x07 +#define MAX77620_FPS_PD_PERIOD_SHIFT 0 +#define MAX77620_FPS_TIME_PERIOD_MASK 0x38 +#define MAX77620_FPS_TIME_PERIOD_SHIFT 3 +#define MAX77620_FPS_EN_SRC_MASK 0x06 +#define MAX77620_FPS_EN_SRC_SHIFT 1 +#define MAX77620_FPS_ENFPS_SW_MASK 0x01 +#define MAX77620_FPS_ENFPS_SW 0x01 + +/* Minimum and maximum FPS period time (in microseconds) are + * different for MAX77620 and Max20024. + */ +#define MAX77620_FPS_PERIOD_MIN_US 40 +#define MAX20024_FPS_PERIOD_MIN_US 20 + +#define MAX77620_FPS_PERIOD_MAX_US 2560 +#define MAX20024_FPS_PERIOD_MAX_US 5120 + +#define MAX77620_REG_FPS_GPIO1 0x54 +#define MAX77620_REG_FPS_GPIO2 0x55 +#define MAX77620_REG_FPS_GPIO3 0x56 +#define MAX77620_REG_FPS_RSO 0x57 +#define MAX77620_REG_CID0 0x58 +#define MAX77620_REG_CID1 0x59 +#define MAX77620_REG_CID2 0x5A +#define MAX77620_REG_CID3 0x5B +#define MAX77620_REG_CID4 0x5C +#define MAX77620_REG_CID5 0x5D + +#define MAX77620_REG_DVSSD4 0x5E +#define MAX20024_REG_MAX_ADD 0x70 + +#define MAX77620_CID_DIDM_MASK 0xF0 +#define MAX77620_CID_DIDM_SHIFT 4 + +/* CNCG2SD */ +#define MAX77620_SD_CNF2_ROVS_EN_SD1 BIT(1) +#define MAX77620_SD_CNF2_ROVS_EN_SD0 BIT(2) + +/* Device Identification Metal */ +#define MAX77620_CID5_DIDM(n) (((n) >> 4) & 0xF) +/* Device Indentification OTP */ +#define MAX77620_CID5_DIDO(n) ((n) & 0xF) + +/* SD CNFG1 */ +#define MAX77620_SD_SR_MASK 0xC0 +#define MAX77620_SD_SR_SHIFT 6 +#define MAX77620_SD_POWER_MODE_MASK 0x30 +#define MAX77620_SD_POWER_MODE_SHIFT 4 +#define MAX77620_SD_CFG1_ADE_MASK BIT(3) +#define MAX77620_SD_CFG1_ADE_DISABLE 0 +#define MAX77620_SD_CFG1_ADE_ENABLE BIT(3) +#define MAX77620_SD_FPWM_MASK 0x04 +#define MAX77620_SD_FPWM_SHIFT 2 +#define MAX77620_SD_FSRADE_MASK 0x01 +#define MAX77620_SD_FSRADE_SHIFT 0 +#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) +#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 +#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) +#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) +#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 +#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) + +/* LDO_CNFG2 */ +#define MAX77620_LDO_POWER_MODE_MASK 0xC0 +#define MAX77620_LDO_POWER_MODE_SHIFT 6 +#define MAX77620_LDO_CFG2_ADE_MASK BIT(1) +#define MAX77620_LDO_CFG2_ADE_DISABLE 0 +#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) +#define MAX77620_LDO_CFG2_SS_MASK BIT(0) +#define MAX77620_LDO_CFG2_SS_FAST BIT(0) +#define MAX77620_LDO_CFG2_SS_SLOW 0 + +#define MAX77620_IRQ_TOP_GLBL_MASK BIT(7) +#define MAX77620_IRQ_TOP_SD_MASK BIT(6) +#define MAX77620_IRQ_TOP_LDO_MASK BIT(5) +#define MAX77620_IRQ_TOP_GPIO_MASK BIT(4) +#define MAX77620_IRQ_TOP_RTC_MASK BIT(3) +#define MAX77620_IRQ_TOP_32K_MASK BIT(2) +#define MAX77620_IRQ_TOP_ONOFF_MASK BIT(1) + +#define MAX77620_IRQ_LBM_MASK BIT(3) +#define MAX77620_IRQ_TJALRM1_MASK BIT(2) +#define MAX77620_IRQ_TJALRM2_MASK BIT(1) + +#define MAX77620_PWR_I2C_ADDR 0x3c +#define MAX77620_RTC_I2C_ADDR 0x68 + +#define MAX77620_CNFG_GPIO_DRV_MASK BIT(0) +#define MAX77620_CNFG_GPIO_DRV_PUSHPULL BIT(0) +#define MAX77620_CNFG_GPIO_DRV_OPENDRAIN 0 +#define MAX77620_CNFG_GPIO_DIR_MASK BIT(1) +#define MAX77620_CNFG_GPIO_DIR_INPUT BIT(1) +#define MAX77620_CNFG_GPIO_DIR_OUTPUT 0 +#define MAX77620_CNFG_GPIO_INPUT_VAL_MASK BIT(2) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW 0 +#define MAX77620_CNFG_GPIO_INT_MASK (0x3 << 4) +#define MAX77620_CNFG_GPIO_INT_FALLING BIT(4) +#define MAX77620_CNFG_GPIO_INT_RISING BIT(5) +#define MAX77620_CNFG_GPIO_DBNC_MASK (0x3 << 6) +#define MAX77620_CNFG_GPIO_DBNC_None (0x0 << 6) +#define MAX77620_CNFG_GPIO_DBNC_8ms (0x1 << 6) +#define MAX77620_CNFG_GPIO_DBNC_16ms (0x2 << 6) +#define MAX77620_CNFG_GPIO_DBNC_32ms (0x3 << 6) + +#define MAX77620_IRQ_LVL2_GPIO_EDGE0 BIT(0) +#define MAX77620_IRQ_LVL2_GPIO_EDGE1 BIT(1) +#define MAX77620_IRQ_LVL2_GPIO_EDGE2 BIT(2) +#define MAX77620_IRQ_LVL2_GPIO_EDGE3 BIT(3) +#define MAX77620_IRQ_LVL2_GPIO_EDGE4 BIT(4) +#define MAX77620_IRQ_LVL2_GPIO_EDGE5 BIT(5) +#define MAX77620_IRQ_LVL2_GPIO_EDGE6 BIT(6) +#define MAX77620_IRQ_LVL2_GPIO_EDGE7 BIT(7) + +#define MAX77620_CNFG1_32K_OUT0_EN BIT(2) + +#define MAX77620_ONOFFCNFG1_SFT_RST BIT(7) +#define MAX77620_ONOFFCNFG1_MRT_MASK 0x38 +#define MAX77620_ONOFFCNFG1_MRT_SHIFT 0x3 +#define MAX77620_ONOFFCNFG1_SLPEN BIT(2) +#define MAX77620_ONOFFCNFG1_PWR_OFF BIT(1) +#define MAX20024_ONOFFCNFG1_CLRSE 0x18 + +#define MAX77620_ONOFFCNFG2_SFT_RST_WK BIT(7) +#define MAX77620_ONOFFCNFG2_WD_RST_WK BIT(6) +#define MAX77620_ONOFFCNFG2_SLP_LPM_MSK BIT(5) +#define MAX77620_ONOFFCNFG2_WK_ALARM1 BIT(2) +#define MAX77620_ONOFFCNFG2_WK_EN0 BIT(0) + +#define MAX77620_GLBLM_MASK BIT(0) + +#define MAX77620_WDTC_MASK 0x3 +#define MAX77620_WDTOFFC BIT(4) +#define MAX77620_WDTSLPC BIT(3) +#define MAX77620_WDTEN BIT(2) + +#define MAX77620_TWD_MASK 0x3 +#define MAX77620_TWD_2s 0x0 +#define MAX77620_TWD_16s 0x1 +#define MAX77620_TWD_64s 0x2 +#define MAX77620_TWD_128s 0x3 + +#define MAX77620_CNFGGLBL1_LBDAC_EN BIT(7) +#define MAX77620_CNFGGLBL1_MPPLD BIT(6) +#define MAX77620_CNFGGLBL1_LBHYST (BIT(5) | BIT(4)) +#define MAX77620_CNFGGLBL1_LBDAC 0x0E +#define MAX77620_CNFGGLBL1_LBRSTEN BIT(0) + +/* CNFG BBC registers */ +#define MAX77620_CNFGBBC_ENABLE BIT(0) +#define MAX77620_CNFGBBC_CURRENT_MASK 0x06 +#define MAX77620_CNFGBBC_CURRENT_SHIFT 1 +#define MAX77620_CNFGBBC_VOLTAGE_MASK 0x18 +#define MAX77620_CNFGBBC_VOLTAGE_SHIFT 3 +#define MAX77620_CNFGBBC_LOW_CURRENT_DISABLE BIT(5) +#define MAX77620_CNFGBBC_RESISTOR_MASK 0xC0 +#define MAX77620_CNFGBBC_RESISTOR_SHIFT 6 + +#define MAX77620_FPS_COUNT 3 + +/* Interrupts */ +enum { + MAX77620_IRQ_TOP_GLBL, /* Low-Battery */ + MAX77620_IRQ_TOP_SD, /* SD power fail */ + MAX77620_IRQ_TOP_LDO, /* LDO power fail */ + MAX77620_IRQ_TOP_GPIO, /* TOP GPIO internal int to MAX77620 */ + MAX77620_IRQ_TOP_RTC, /* RTC */ + MAX77620_IRQ_TOP_32K, /* 32kHz oscillator */ + MAX77620_IRQ_TOP_ONOFF, /* ON/OFF oscillator */ + MAX77620_IRQ_LBT_MBATLOW, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM1, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM2, /* Thermal alarm status, > 140C */ +}; + +/* GPIOs */ +enum { + MAX77620_GPIO0, + MAX77620_GPIO1, + MAX77620_GPIO2, + MAX77620_GPIO3, + MAX77620_GPIO4, + MAX77620_GPIO5, + MAX77620_GPIO6, + MAX77620_GPIO7, + MAX77620_GPIO_NR, +}; + +/* FPS Source */ +enum max77620_fps_src { + MAX77620_FPS_SRC_0, + MAX77620_FPS_SRC_1, + MAX77620_FPS_SRC_2, + MAX77620_FPS_SRC_NONE, + MAX77620_FPS_SRC_DEF, +}; + +enum max77620_chip_id { + MAX77620, + MAX20024, +}; + +struct max77620_chip { + struct device *dev; + struct regmap *rmap; + + int chip_irq; + int irq_base; + + /* chip id */ + enum max77620_chip_id chip_id; + + bool sleep_enable; + bool enable_global_lpm; + int shutdown_fps_period[MAX77620_FPS_COUNT]; + int suspend_fps_period[MAX77620_FPS_COUNT]; + + struct regmap_irq_chip_data *top_irq_data; + struct regmap_irq_chip_data *gpio_irq_data; +}; + +#endif /* _MFD_MAX77620_H_ */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 6bc4bcd488ac..5a23dd4df432 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -30,6 +30,9 @@ #define MIN_600_MV 600000 #define MIN_500_MV 500000 +/* Ramp delay in uV/us */ +#define RAMP_DELAY_12_MVUS 12000 + /* Macros to represent steps for LDO/BUCK */ #define STEP_50_MV 50000 #define STEP_25_MV 25000 diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index 1088149be0c9..40a76b97b7ab 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -16,6 +16,7 @@ #define __LINUX_MFD_SYSCON_H__ #include <linux/err.h> +#include <linux/errno.h> struct device_node; diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h index 9352adc95de6..76f30f940c70 100644 --- a/include/linux/mfd/syscon/exynos5-pmu.h +++ b/include/linux/mfd/syscon/exynos5-pmu.h @@ -38,6 +38,9 @@ /* Exynos5433 specific register definitions */ #define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728) +#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710) +#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714) +#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718) #define EXYNOS5_PHY_ENABLE BIT(0) diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index 238c8db953eb..c8e0164c5423 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -95,6 +95,7 @@ #define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0) #define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30) +#define IMX6Q_GPR1_PCIE_SW_RST BIT(29) #define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28) #define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27) #define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26) @@ -447,5 +448,11 @@ #define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18) #define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17) #define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17) +#define IMX6UL_GPR1_SAI1_MCLK_DIR (0x1 << 19) +#define IMX6UL_GPR1_SAI2_MCLK_DIR (0x1 << 20) +#define IMX6UL_GPR1_SAI3_MCLK_DIR (0x1 << 21) +#define IMX6UL_GPR1_SAI_MCLK_MASK (0x7 << 19) +#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \ + IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR) #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 05d58ee5e6a7..7a26286db895 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -66,8 +66,8 @@ */ #define TMIO_MMC_SDIO_IRQ (1 << 2) -/* Some controllers don't need to wait 10ms for clock changes */ -#define TMIO_MMC_FAST_CLK_CHG (1 << 3) +/* Some features are only available or tested on RCar Gen2 or later */ +#define TMIO_MMC_MIN_RCAR2 (1 << 3) /* * Some controllers require waiting for the SD bus to become diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h index 2de565b94d0c..4ee908f5b834 100644 --- a/include/linux/mfd/wm8400-private.h +++ b/include/linux/mfd/wm8400-private.h @@ -923,7 +923,6 @@ struct wm8400 { #define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */ #define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */ -u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg); int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index d1f904c8b2cb..80dec87a94f8 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1058,7 +1058,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) { - if (BITS_PER_LONG == 64 || buf->nbufs == 1) + if (buf->nbufs == 1) return buf->direct.buf + offset; else return buf->page_list[offset >> PAGE_SHIFT].buf + @@ -1098,7 +1098,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, - int size, int max_direct); + int size); void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int size); diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index b2c9fada8eac..2be976dd4966 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -53,6 +53,11 @@ struct mlx5_core_cq { unsigned arm_sn; struct mlx5_rsc_debug *dbg; int pid; + struct { + struct list_head list; + void (*comp)(struct mlx5_core_cq *); + void *priv; + } tasklet_ctx; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index b3575f392492..035abdf62cfe 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -59,6 +59,7 @@ #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) @@ -655,7 +656,9 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 rsvd0[4]; + u8 outer_l3_tunneled; + u8 rsvd0; + __be16 wqe_id; u8 lro_tcppsh_abort_dupack; u8 lro_min_ttl; __be16 lro_tcp_win; @@ -668,7 +671,7 @@ struct mlx5_cqe64 { __be16 slid; __be32 flags_rqpn; u8 hds_ip_ext; - u8 l4_hdr_type_etc; + u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; @@ -682,6 +685,40 @@ struct mlx5_cqe64 { u8 op_own; }; +struct mlx5_mini_cqe8 { + union { + __be32 rx_hash_result; + struct { + __be16 checksum; + __be16 rsvd; + }; + struct { + __be16 wqe_counter; + u8 s_wqe_opcode; + u8 reserved; + } s_wqe_info; + }; + __be32 byte_cnt; +}; + +enum { + MLX5_NO_INLINE_DATA, + MLX5_INLINE_DATA32_SEG, + MLX5_INLINE_DATA64_SEG, + MLX5_COMPRESSED, +}; + +enum { + MLX5_CQE_FORMAT_CSUM = 0x1, +}; + +#define MLX5_MINI_CQE_ARRAY_SIZE 8 + +static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) +{ + return (cqe->op_own >> 2) & 0x3; +} + static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; @@ -689,12 +726,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) { - return (cqe->l4_hdr_type_etc >> 4) & 0x7; + return (cqe->l4_l3_hdr_type >> 4) & 0x7; +} + +static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) +{ + return (cqe->l4_l3_hdr_type >> 2) & 0x3; +} + +static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) +{ + return cqe->outer_l3_tunneled & 0x1; } static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) { - return !!(cqe->l4_hdr_type_etc & 0x1); + return !!(cqe->l4_l3_hdr_type & 0x1); } static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) @@ -707,6 +754,42 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } +struct mpwrq_cqe_bc { + __be16 filler_consumed_strides; + __be16 byte_cnt; +}; + +static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return be16_to_cpu(bc->byte_cnt); +} + +static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) +{ + return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return mpwrq_get_cqe_bc_consumed_strides(bc); +} + +static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) +{ + return be16_to_cpu(cqe->wqe_counter); +} + enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, @@ -1300,6 +1383,18 @@ enum mlx5_cap_type { #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) +#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) + #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) @@ -1342,6 +1437,7 @@ enum { MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, + MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 369c837d40f5..80776d0c52dc 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -41,11 +41,17 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/radix-tree.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> enum { + MLX5_RQ_BITMASK_VSD = 1 << 1, +}; + +enum { MLX5_BOARD_ID_LEN = 64, MLX5_MAX_NAME_LEN = 16, }; @@ -112,9 +118,12 @@ enum { MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, - MLX5_REG_PMLP = 0, /* TBD */ + MLX5_REG_PCMR = 0x5041, + MLX5_REG_PMLP = 0x5002, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MCIA = 0x9014, + MLX5_REG_MLCR = 0x902b, }; enum { @@ -304,6 +313,14 @@ struct mlx5_buf { u8 page_shift; }; +struct mlx5_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; @@ -317,6 +334,7 @@ struct mlx5_eq { struct list_head list; int index; struct mlx5_rsc_debug *dbg; + struct mlx5_eq_tasklet tasklet_ctx; }; struct mlx5_core_psv { @@ -450,6 +468,17 @@ struct mlx5_irq_info { char name[MLX5_MAX_IRQ_NAME]; }; +struct mlx5_fc_stats { + struct list_head list; + struct list_head addlist; + /* protect addlist add/splice operations */ + spinlock_t addlist_lock; + + struct workqueue_struct *wq; + struct delayed_work work; + unsigned long next_query; +}; + struct mlx5_eswitch; struct mlx5_priv { @@ -511,6 +540,10 @@ struct mlx5_priv { unsigned long pci_dev_data; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; + struct mlx5_flow_root_namespace *esw_egress_root_ns; + struct mlx5_flow_root_namespace *esw_ingress_root_ns; + + struct mlx5_fc_stats fc_stats; }; enum mlx5_device_state { @@ -553,6 +586,9 @@ struct mlx5_core_dev { struct mlx5_profile *profile; atomic_t num_qps; u32 issi; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif }; struct mlx5_db { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 8dec5508d93d..4b7a107d9c19 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -58,6 +58,8 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_LEFTOVERS, MLX5_FLOW_NAMESPACE_ANCHOR, MLX5_FLOW_NAMESPACE_FDB, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, }; struct mlx5_flow_table; @@ -71,6 +73,7 @@ struct mlx5_flow_destination { u32 tir_num; struct mlx5_flow_table *ft; u32 vport_num; + struct mlx5_fc *counter; }; }; @@ -82,12 +85,19 @@ struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, - int max_num_groups); + int max_num_groups, + u32 level); struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, - int num_flow_table_entries); + int num_flow_table_entries, + u32 level); +struct mlx5_flow_table * +mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + u32 level, u16 vport); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); /* inbox should be set with the following values: @@ -113,4 +123,13 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, struct mlx5_flow_destination *dest); void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); +int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest); + +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse); + #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c15b8a864937..9a05cd7e5890 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -202,6 +202,9 @@ enum { MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, + MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, + MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, + MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c }; @@ -265,7 +268,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits { struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_1[0x1]; + u8 flow_counter[0x1]; u8 flow_modify_en[0x1]; u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; @@ -513,7 +517,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 max_lso_cap[0x5]; u8 reserved_at_10[0x4]; u8 rss_ind_tbl_cap[0x4]; - u8 reserved_at_18[0x3]; + u8 reg_umr_sq[0x1]; + u8 scatter_fcs[0x1]; + u8 reserved_at_1a[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; u8 reserved_at_1c[0x2]; u8 tunnel_statless_gre[0x1]; @@ -648,7 +654,7 @@ struct mlx5_ifc_vector_calc_cap_bits { enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, - MLX5_WQ_TYPE_STRQ = 0x2, + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, }; enum { @@ -750,21 +756,25 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; - u8 early_vf_enable; - u8 reserved_at_1a8[0x2]; + u8 early_vf_enable[0x1]; + u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; - u8 reserved_at_1af[0x6]; + u8 reserved_at_1af[0x2]; + u8 ports_check[0x1]; + u8 reserved_at_1b2[0x1]; + u8 disable_link_up[0x1]; + u8 beacon_led[0x1]; u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_at_1bf[0x3]; + u8 reserved_at_1c0[0x3]; u8 log_max_msg[0x5]; - u8 reserved_at_1c7[0x4]; + u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; - u8 reserved_at_1cf[0x6]; + u8 reserved_at_1d0[0x6]; u8 rol_s[0x1]; u8 rol_g[0x1]; - u8 reserved_at_1d7[0x1]; + u8 reserved_at_1d8[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; @@ -774,47 +784,48 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_at_1ef[0xc]; + u8 reserved_at_1f0[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; - u8 reserved_at_200[0x3]; + u8 striding_rq[0x1]; + u8 reserved_at_201[0x2]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_204[0xa]; + u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; - u8 reserved_at_212[0x1]; + u8 reserved_at_213[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; - u8 reserved_at_215[0x1]; + u8 reserved_at_216[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; - u8 reserved_at_21a[0x1]; + u8 reserved_at_21b[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; - u8 reserved_at_21e[0x1]; + u8 reserved_at_21f[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 reserved_at_222[0x3]; + u8 reserved_at_223[0x3]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; - u8 reserved_at_228[0x1]; + u8 reserved_at_229[0x1]; u8 scqe_break_moderation[0x1]; - u8 reserved_at_22a[0x1]; + u8 cq_period_start_from_cqe[0x1]; u8 cd[0x1]; - u8 reserved_at_22c[0x1]; + u8 reserved_at_22d[0x1]; u8 apm[0x1]; u8 vector_calc[0x1]; - u8 reserved_at_22f[0x1]; + u8 umr_ptr_rlky[0x1]; u8 imaicl[0x1]; - u8 reserved_at_231[0x4]; + u8 reserved_at_232[0x4]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; @@ -824,104 +835,109 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_at_23f[0xa]; + u8 reserved_at_240[0xa]; u8 uar_sz[0x6]; - u8 reserved_at_24f[0x8]; + u8 reserved_at_250[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_at_260[0x1]; + u8 reserved_at_261[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_at_262[0x8]; + u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_at_26f[0x10]; + u8 reserved_at_270[0x10]; - u8 reserved_at_27f[0x10]; + u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_at_29f[0x10]; + u8 reserved_at_2a0[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 reserved_at_2bf[0x10]; + u8 reserved_at_2c0[0x10]; u8 max_wqe_sz_sq_dc[0x10]; - u8 reserved_at_2df[0x7]; + u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_at_2ff[0x18]; + u8 reserved_at_300[0x18]; u8 log_max_mcg[0x8]; - u8 reserved_at_31f[0x3]; + u8 reserved_at_320[0x3]; u8 log_max_transport_domain[0x5]; - u8 reserved_at_327[0x3]; + u8 reserved_at_328[0x3]; u8 log_max_pd[0x5]; - u8 reserved_at_32f[0xb]; + u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_at_33f[0x20]; + u8 reserved_at_340[0x20]; - u8 reserved_at_35f[0x3]; + u8 reserved_at_360[0x3]; u8 log_max_rq[0x5]; - u8 reserved_at_367[0x3]; + u8 reserved_at_368[0x3]; u8 log_max_sq[0x5]; - u8 reserved_at_36f[0x3]; + u8 reserved_at_370[0x3]; u8 log_max_tir[0x5]; - u8 reserved_at_377[0x3]; + u8 reserved_at_378[0x3]; u8 log_max_tis[0x5]; u8 basic_cyclic_rcv_wqe[0x1]; - u8 reserved_at_380[0x2]; + u8 reserved_at_381[0x2]; u8 log_max_rmp[0x5]; - u8 reserved_at_387[0x3]; + u8 reserved_at_388[0x3]; u8 log_max_rqt[0x5]; - u8 reserved_at_38f[0x3]; + u8 reserved_at_390[0x3]; u8 log_max_rqt_size[0x5]; - u8 reserved_at_397[0x3]; + u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_at_39f[0x3]; + u8 reserved_at_3a0[0x3]; u8 log_max_stride_sz_rq[0x5]; - u8 reserved_at_3a7[0x3]; + u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; - u8 reserved_at_3af[0x3]; + u8 reserved_at_3b0[0x3]; u8 log_max_stride_sz_sq[0x5]; - u8 reserved_at_3b7[0x3]; + u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 reserved_at_3bf[0x1b]; + u8 reserved_at_3c0[0x1b]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 reserved_at_3e0[0xa]; + u8 reserved_at_3e1[0xa]; u8 log_max_vlan_list[0x5]; - u8 reserved_at_3ef[0x3]; + u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; - u8 reserved_at_3f7[0x3]; + u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; - u8 reserved_at_3ff[0x80]; + u8 reserved_at_400[0x80]; - u8 reserved_at_47f[0x3]; + u8 reserved_at_480[0x3]; u8 log_max_l2_table[0x5]; - u8 reserved_at_487[0x8]; + u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_at_49f[0x20]; + u8 reserved_at_4a0[0x20]; u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; - u8 reserved_at_4ff[0x5f]; - u8 cqe_zip[0x1]; - u8 cqe_zip_timeout[0x10]; - u8 cqe_zip_max_num[0x10]; + u8 reserved_at_500[0x80]; - u8 reserved_at_57f[0x220]; + u8 reserved_at_580[0x3f]; + u8 cqe_compression[0x1]; + + u8 cqe_compression_timeout[0x10]; + u8 cqe_compression_max_num[0x10]; + + u8 reserved_at_5e0[0x220]; }; enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, + + MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, }; struct mlx5_ifc_dest_format_struct_bits { @@ -931,6 +947,19 @@ struct mlx5_ifc_dest_format_struct_bits { u8 reserved_at_20[0x20]; }; +struct mlx5_ifc_flow_counter_list_bits { + u8 reserved_at_0[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_20[0x20]; +}; + +union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { + struct mlx5_ifc_dest_format_struct_bits dest_format_struct; + struct mlx5_ifc_flow_counter_list_bits flow_counter_list; + u8 reserved_at_0[0x40]; +}; + struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; @@ -997,7 +1026,13 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0x4e0]; + u8 reserved_at_120[0x15]; + u8 log_wqe_num_of_strides[0x3]; + u8 two_byte_shift_en[0x1]; + u8 reserved_at_139[0x4]; + u8 log_wqe_stride_size[0x3]; + + u8 reserved_at_140[0x4c0]; struct mlx5_ifc_cmd_pas_bits pas[0]; }; @@ -1990,6 +2025,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, + MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, }; struct mlx5_ifc_flow_context_bits { @@ -2006,13 +2042,16 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_80[0x8]; u8 destination_list_size[0x18]; - u8 reserved_at_a0[0x160]; + u8 reserved_at_a0[0x8]; + u8 flow_counter_list_size[0x18]; + + u8 reserved_at_c0[0x140]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_at_1200[0x600]; - struct mlx5_ifc_dest_format_struct_bits destination[0]; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; }; enum { @@ -2196,7 +2235,8 @@ struct mlx5_ifc_sqc_bits { u8 flush_in_error_en[0x1]; u8 reserved_at_4[0x4]; u8 state[0x4]; - u8 reserved_at_c[0x14]; + u8 reg_umr[0x1]; + u8 reserved_at_d[0x13]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2244,7 +2284,8 @@ enum { struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_1[0x1]; + u8 scatter_fcs[0x1]; u8 vsd[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; @@ -2601,6 +2642,11 @@ enum { MLX5_CQC_ST_FIRED = 0xa, }; +enum { + MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, +}; + struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x4]; @@ -2609,8 +2655,8 @@ struct mlx5_ifc_cqc_bits { u8 reserved_at_c[0x1]; u8 scqe_break_moderation_en[0x1]; u8 oi[0x1]; - u8 reserved_at_f[0x2]; - u8 cqe_zip_en[0x1]; + u8 cq_period_mode[0x2]; + u8 cqe_comp_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; u8 reserved_at_18[0x8]; @@ -2984,7 +3030,11 @@ struct mlx5_ifc_set_fte_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -3910,6 +3960,34 @@ struct mlx5_ifc_query_flow_group_in_bits { u8 reserved_at_e0[0x120]; }; +struct mlx5_ifc_query_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; +}; + +struct mlx5_ifc_query_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x80]; + + u8 clear[0x1]; + u8 reserved_at_c1[0xf]; + u8 num_of_counters[0x10]; + + u8 reserved_at_e0[0x10]; + u8 flow_counter_id[0x10]; +}; + struct mlx5_ifc_query_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5178,7 +5256,11 @@ struct mlx5_ifc_destroy_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5205,7 +5287,11 @@ struct mlx5_ifc_destroy_flow_group_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5346,7 +5432,11 @@ struct mlx5_ifc_delete_fte_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5471,6 +5561,28 @@ struct mlx5_ifc_dealloc_pd_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_dealloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5792,7 +5904,11 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5836,7 +5952,11 @@ struct mlx5_ifc_create_flow_group_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -6190,6 +6310,28 @@ struct mlx5_ifc_alloc_pd_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_alloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_add_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6369,6 +6511,17 @@ struct mlx5_ifc_ptys_reg_bits { u8 reserved_at_1a0[0x60]; }; +struct mlx5_ifc_mlcr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x20]; + + u8 beacon_duration[0x10]; + u8 reserved_at_40[0x10]; + + u8 beacon_remain[0x10]; +}; + struct mlx5_ifc_ptas_reg_bits { u8 reserved_at_0[0x20]; @@ -6778,6 +6931,16 @@ struct mlx5_ifc_pamp_reg_bits { u8 index_data[18][0x10]; }; +struct mlx5_ifc_pcmr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x2e]; + u8 fcs_cap[0x1]; + u8 reserved_at_3f[0x1f]; + u8 fcs_chk[0x1]; + u8 reserved_at_5f[0x1]; +}; + struct mlx5_ifc_lane_2_module_mapping_bits { u8 reserved_at_0[0x6]; u8 rx_lane[0x2]; @@ -7114,6 +7277,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pspa_reg_bits pspa_reg; struct mlx5_ifc_ptas_reg_bits ptas_reg; struct mlx5_ifc_ptys_reg_bits ptys_reg; + struct mlx5_ifc_mlcr_reg_bits mlcr_reg; struct mlx5_ifc_pude_reg_bits pude_reg; struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; @@ -7147,7 +7311,11 @@ struct mlx5_ifc_set_flow_table_root_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -7178,7 +7346,9 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x20]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 modify_field_select[0x10]; @@ -7244,4 +7414,34 @@ struct mlx5_ifc_qtct_reg_bits { u8 tclass[0x3]; }; +struct mlx5_ifc_mcia_reg_bits { + u8 l[0x1]; + u8 reserved_at_1[0x7]; + u8 module[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 i2c_device_address[0x8]; + u8 page_number[0x8]; + u8 device_address[0x10]; + + u8 reserved_at_40[0x10]; + u8 size[0x10]; + + u8 reserved_at_60[0x20]; + + u8 dword_0[0x20]; + u8 dword_1[0x20]; + u8 dword_2[0x20]; + u8 dword_3[0x20]; + u8 dword_4[0x20]; + u8 dword_5[0x20]; + u8 dword_6[0x20]; + u8 dword_7[0x20]; + u8 dword_8[0x20]; + u8 dword_9[0x20]; + u8 dword_10[0x20]; + u8 dword_11[0x20]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index b30250ab7604..9851862c0ec5 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -35,6 +35,24 @@ #include <linux/mlx5/driver.h> +enum mlx5_beacon_duration { + MLX5_BEACON_DURATION_OFF = 0x0, + MLX5_BEACON_DURATION_INF = 0xffff, +}; + +enum mlx5_module_id { + MLX5_MODULE_ID_SFP = 0x3, + MLX5_MODULE_ID_QSFP = 0xC, + MLX5_MODULE_ID_QSFP_PLUS = 0xD, + MLX5_MODULE_ID_QSFP28 = 0x11, +}; + +#define MLX5_EEPROM_MAX_BYTES 32 +#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff +#define MLX5_I2C_ADDR_LOW 0x50 +#define MLX5_I2C_ADDR_HIGH 0x51 +#define MLX5_EEPROM_PAGE_LENGTH 256 + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); @@ -53,6 +71,7 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); +int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); @@ -84,4 +103,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); +int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); +void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, + bool *enabled); +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data); + #endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index cf031a3f16c5..64221027bf1f 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -668,6 +668,12 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *sq); void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, struct mlx5_core_qp *sq); +int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); +int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); +int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, + int reset, void *out, int out_size); +int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, + u32 *out_of_buffer); static inline const char *mlx5_qp_type_str(int type) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 8f468e0d2534..2835d598d258 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -72,6 +72,10 @@ extern int mmap_rnd_compat_bits __read_mostly; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif +#ifndef page_to_virt +#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) +#endif + /* * To prevent common memory management code establishing * a zero page mapping on a read fault. @@ -443,14 +447,14 @@ unsigned long vmalloc_to_pfn(const void *addr); * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ -static inline int is_vmalloc_addr(const void *x) +static inline bool is_vmalloc_addr(const void *x) { #ifdef CONFIG_MMU unsigned long addr = (unsigned long)x; return addr >= VMALLOC_START && addr < VMALLOC_END; #else - return 0; + return false; #endif } #ifdef CONFIG_MMU @@ -471,8 +475,7 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page) static inline int compound_mapcount(struct page *page) { - if (!PageCompound(page)) - return 0; + VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); return atomic_read(compound_mapcount_ptr(page)) + 1; } @@ -593,7 +596,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) } void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon); + struct page *page, pte_t *pte, bool write, bool anon, bool old); #endif /* @@ -730,7 +733,7 @@ static inline void get_page(struct page *page) page = compound_head(page); /* * Getting a normal page or the head of a compound page - * requires to already have an elevated page->_count. + * requires to already have an elevated page->_refcount. */ VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); page_ref_inc(page); @@ -846,10 +849,7 @@ extern int page_cpupid_xchg_last(struct page *page, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { - int cpupid = (1 << LAST_CPUPID_SHIFT) - 1; - - page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); - page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; + page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ #else /* !CONFIG_NUMA_BALANCING */ @@ -957,7 +957,7 @@ static inline struct mem_cgroup *page_memcg(struct page *page) static __always_inline void *lowmem_page_address(const struct page *page) { - return __va(PFN_PHYS(page_to_pfn(page))); + return page_to_virt(page); } #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) @@ -1028,26 +1028,7 @@ static inline pgoff_t page_file_index(struct page *page) return page->index; } -/* - * Return true if this page is mapped into pagetables. - * For compound page it returns true if any subpage of compound page is mapped. - */ -static inline bool page_mapped(struct page *page) -{ - int i; - if (likely(!PageCompound(page))) - return atomic_read(&page->_mapcount) >= 0; - page = compound_head(page); - if (atomic_read(compound_mapcount_ptr(page)) >= 0) - return true; - if (PageHuge(page)) - return false; - for (i = 0; i < hpage_nr_pages(page); i++) { - if (atomic_read(&page[i]._mapcount) >= 0) - return true; - } - return false; -} +bool page_mapped(struct page *page); /* * Return true only if the page has been allocated with @@ -1782,7 +1763,7 @@ extern void free_highmem_page(struct page *page); extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); -extern void reserve_bootmem_region(unsigned long start, unsigned long end); +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void __free_reserved_page(struct page *page) @@ -2030,9 +2011,9 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {} #endif /* These take the mm semaphore themselves */ -extern unsigned long vm_brk(unsigned long, unsigned long); +extern unsigned long __must_check vm_brk(unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); -extern unsigned long vm_mmap(struct file *, unsigned long, +extern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); @@ -2405,6 +2386,9 @@ static inline bool page_is_guard(struct page *page) return false; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return false; + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); } #else diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 712e8c37a200..5bd29ba4f174 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -22,22 +22,34 @@ static inline int page_is_file_cache(struct page *page) return !PageSwapBacked(page); } +static __always_inline void __update_lru_size(struct lruvec *lruvec, + enum lru_list lru, int nr_pages) +{ + __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); +} + +static __always_inline void update_lru_size(struct lruvec *lruvec, + enum lru_list lru, int nr_pages) +{ +#ifdef CONFIG_MEMCG + mem_cgroup_update_lru_size(lruvec, lru, nr_pages); +#else + __update_lru_size(lruvec, lru, nr_pages); +#endif +} + static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - int nr_pages = hpage_nr_pages(page); - mem_cgroup_update_lru_size(lruvec, lru, nr_pages); + update_lru_size(lruvec, lru, hpage_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); - __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); } static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - int nr_pages = hpage_nr_pages(page); - mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); list_del(&page->lru); - __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); + update_lru_size(lruvec, lru, -hpage_nr_pages(page)); } /** diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c2d75b4fa86c..d553855503e6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -12,6 +12,7 @@ #include <linux/cpumask.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> +#include <linux/workqueue.h> #include <asm/page.h> #include <asm/mmu.h> @@ -73,9 +74,9 @@ struct page { unsigned long counters; #else /* - * Keep _count separate from slub cmpxchg_double data. - * As the rest of the double word is protected by - * slab_lock but _count is not. + * Keep _refcount separate from slub cmpxchg_double + * data. As the rest of the double word is protected by + * slab_lock but _refcount is not. */ unsigned counters; #endif @@ -97,7 +98,11 @@ struct page { }; int units; /* SLOB */ }; - atomic_t _count; /* Usage count, see below. */ + /* + * Usage count, *USE WRAPPER FUNCTION* + * when manual accounting. See page_ref.h + */ + atomic_t _refcount; }; unsigned int active; /* SLAB */ }; @@ -248,7 +253,7 @@ struct page_frag_cache { __u32 offset; #endif /* we maintain a pagecount bias, so that we dont dirty cache line - * containing page->_count every time we allocate a fragment. + * containing page->_refcount every time we allocate a fragment. */ unsigned int pagecnt_bias; bool pfmemalloc; @@ -509,6 +514,7 @@ struct mm_struct { #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif + struct work_struct async_put_work; }; static inline void mm_init_cpumask(struct mm_struct *mm) diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 7b41c6db1bb6..f7ed271a1d54 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -36,7 +36,6 @@ enum { EVENT_XFER_COMPLETE, EVENT_DATA_COMPLETE, EVENT_DATA_ERROR, - EVENT_XFER_ERROR }; struct mmc_data; @@ -55,6 +54,7 @@ struct dw_mci_dma_slave { /** * struct dw_mci - MMC controller state shared between all slots * @lock: Spinlock protecting the queue and associated data. + * @irq_lock: Spinlock protecting the INTMASK setting. * @regs: Pointer to MMIO registers. * @fifo_reg: Pointer to MMIO registers for data FIFO * @sg: Scatterlist entry currently being processed by PIO code, if any. @@ -65,6 +65,9 @@ struct dw_mci_dma_slave { * @cmd: The command currently being sent to the card, or NULL. * @data: The data currently being transferred, or NULL if no data * transfer is in progress. + * @stop_abort: The command currently prepared for stoping transfer. + * @prev_blksz: The former transfer blksz record. + * @timing: Record of current ios timing. * @use_dma: Whether DMA channel is initialized or not. * @using_dma: Whether DMA is in use for the current transfer. * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. @@ -72,7 +75,10 @@ struct dw_mci_dma_slave { * @sg_cpu: Virtual address of DMA buffer. * @dma_ops: Pointer to platform-specific DMA callbacks. * @cmd_status: Snapshot of SR taken upon completion of the current + * @ring_size: Buffer size for idma descriptors. * command. Only valid when EVENT_CMD_COMPLETE is pending. + * @dms: structure of slave-dma private data. + * @phy_regs: physical address of controller's register map * @data_status: Snapshot of SR taken upon completion of the current * data transfer. Only valid when EVENT_DATA_COMPLETE or * EVENT_DATA_ERROR is pending. @@ -80,7 +86,6 @@ struct dw_mci_dma_slave { * to be sent. * @dir_status: Direction of current transfer. * @tasklet: Tasklet running the request state machine. - * @card_tasklet: Tasklet handling card detect. * @pending_events: Bitmask of events flagged by the interrupt handler * to be processed by the tasklet. * @completed_events: Bitmask of events which the state machine has @@ -91,6 +96,7 @@ struct dw_mci_dma_slave { * rate and timeout calculations. * @current_speed: Configured rate of the controller. * @num_slots: Number of slots available. + * @fifoth_val: The value of FIFOTH register. * @verid: Denote Version ID. * @dev: Device associated with the MMC controller. * @pdata: Platform data associated with the MMC controller. @@ -107,9 +113,11 @@ struct dw_mci_dma_slave { * @push_data: Pointer to FIFO push function. * @pull_data: Pointer to FIFO pull function. * @quirks: Set of quirks that apply to specific versions of the IP. + * @vqmmc_enabled: Status of vqmmc, should be true or false. * @irq_flags: The flags to be passed to request_irq. * @irq: The irq value to be passed to request_irq. * @sdio_id0: Number of slot0 in the SDIO interrupt registers. + * @cmd11_timer: Timer for SD3.0 voltage switch over scheme. * @dto_timer: Timer for broken data transfer over scheme. * * Locking diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 8dd4d290ab0d..85800b48241f 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -93,28 +93,39 @@ struct mmc_host_ops { void (*pre_req)(struct mmc_host *host, struct mmc_request *req, bool is_first_req); void (*request)(struct mmc_host *host, struct mmc_request *req); + + /* + * Avoid calling the next three functions too often or in a "fast + * path", since underlaying controller might implement them in an + * expensive and/or slow way. Also note that these functions might + * sleep, so don't call them in the atomic contexts! + */ + + /* + * Notes to the set_ios callback: + * ios->clock might be 0. For some controllers, setting 0Hz + * as any other frequency works. However, some controllers + * explicitly need to disable the clock. Otherwise e.g. voltage + * switching might fail because the SDCLK is not really quiet. + */ + void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); + /* - * Avoid calling these three functions too often or in a "fast path", - * since underlaying controller might implement them in an expensive - * and/or slow way. - * - * Also note that these functions might sleep, so don't call them - * in the atomic contexts! - * * Return values for the get_ro callback should be: * 0 for a read/write card * 1 for a read-only card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened - * + */ + int (*get_ro)(struct mmc_host *host); + + /* * Return values for the get_cd callback should be: * 0 for a absent card * 1 for a present card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened */ - void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); - int (*get_ro)(struct mmc_host *host); int (*get_cd)(struct mmc_host *host); void (*enable_sdio_irq)(struct mmc_host *host, int enable); diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 83430f2ea757..0d126aeb3ec0 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -36,6 +36,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h deleted file mode 100644 index 95d6f0314a7d..000000000000 --- a/include/linux/mmc/sh_mobile_sdhi.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef LINUX_MMC_SH_MOBILE_SDHI_H -#define LINUX_MMC_SH_MOBILE_SDHI_H - -#include <linux/types.h> - -#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect" -#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" -#define SH_MOBILE_SDHI_IRQ_SDIO "sdio" - -#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */ diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h deleted file mode 100644 index 5f5cd80e9765..000000000000 --- a/include/linux/mmc/tmio.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * include/linux/mmc/tmio.h - * - * Copyright (C) 2016 Sang Engineering, Wolfram Sang - * Copyright (C) 2015-16 Renesas Electronics Corporation - * Copyright (C) 2007 Ian Molton - * Copyright (C) 2004 Ian Molton - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Driver for the MMC / SD / SDIO cell found in: - * - * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 - */ -#ifndef LINUX_MMC_TMIO_H -#define LINUX_MMC_TMIO_H - -#define CTL_SD_CMD 0x00 -#define CTL_ARG_REG 0x04 -#define CTL_STOP_INTERNAL_ACTION 0x08 -#define CTL_XFER_BLK_COUNT 0xa -#define CTL_RESPONSE 0x0c -#define CTL_STATUS 0x1c -#define CTL_STATUS2 0x1e -#define CTL_IRQ_MASK 0x20 -#define CTL_SD_CARD_CLK_CTL 0x24 -#define CTL_SD_XFER_LEN 0x26 -#define CTL_SD_MEM_CARD_OPT 0x28 -#define CTL_SD_ERROR_DETAIL_STATUS 0x2c -#define CTL_SD_DATA_PORT 0x30 -#define CTL_TRANSACTION_CTL 0x34 -#define CTL_SDIO_STATUS 0x36 -#define CTL_SDIO_IRQ_MASK 0x38 -#define CTL_DMA_ENABLE 0xd8 -#define CTL_RESET_SD 0xe0 -#define CTL_VERSION 0xe2 -#define CTL_SDIO_REGS 0x100 -#define CTL_CLK_AND_WAIT_CTL 0x138 -#define CTL_RESET_SDIO 0x1e0 - -/* Definitions for values the CTRL_STATUS register can take. */ -#define TMIO_STAT_CMDRESPEND 0x00000001 -#define TMIO_STAT_DATAEND 0x00000004 -#define TMIO_STAT_CARD_REMOVE 0x00000008 -#define TMIO_STAT_CARD_INSERT 0x00000010 -#define TMIO_STAT_SIGSTATE 0x00000020 -#define TMIO_STAT_WRPROTECT 0x00000080 -#define TMIO_STAT_CARD_REMOVE_A 0x00000100 -#define TMIO_STAT_CARD_INSERT_A 0x00000200 -#define TMIO_STAT_SIGSTATE_A 0x00000400 -#define TMIO_STAT_CMD_IDX_ERR 0x00010000 -#define TMIO_STAT_CRCFAIL 0x00020000 -#define TMIO_STAT_STOPBIT_ERR 0x00040000 -#define TMIO_STAT_DATATIMEOUT 0x00080000 -#define TMIO_STAT_RXOVERFLOW 0x00100000 -#define TMIO_STAT_TXUNDERRUN 0x00200000 -#define TMIO_STAT_CMDTIMEOUT 0x00400000 -#define TMIO_STAT_RXRDY 0x01000000 -#define TMIO_STAT_TXRQ 0x02000000 -#define TMIO_STAT_ILL_FUNC 0x20000000 -#define TMIO_STAT_CMD_BUSY 0x40000000 -#define TMIO_STAT_ILL_ACCESS 0x80000000 - -#define CLK_CTL_DIV_MASK 0xff -#define CLK_CTL_SCLKEN BIT(8) - -#define TMIO_BBS 512 /* Boot block size */ - -#endif /* LINUX_MMC_TMIO_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c60df9257cc7..02069c23486d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -85,13 +85,6 @@ extern int page_group_by_mobility_disabled; get_pfnblock_flags_mask(page, page_to_pfn(page), \ PB_migrate_end, MIGRATETYPE_MASK) -static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) -{ - BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); - return get_pfnblock_flags_mask(page, pfn, PB_migrate_end, - MIGRATETYPE_MASK); -} - struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; @@ -746,8 +739,12 @@ static inline bool is_dev_zone(const struct zone *zone) extern struct mutex zonelists_mutex; void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int classzone_idx, unsigned int alloc_flags, + long free_pages); bool zone_watermark_ok(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, int alloc_flags); + unsigned long mark, int classzone_idx, + unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx); enum memmap_context { @@ -828,10 +825,7 @@ static inline int is_highmem_idx(enum zone_type idx) static inline int is_highmem(struct zone *zone) { #ifdef CONFIG_HIGHMEM - int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; - return zone_off == ZONE_HIGHMEM * sizeof(*zone) || - (zone_off == ZONE_MOVABLE * sizeof(*zone) && - zone_movable_is_highmem()); + return is_highmem_idx(zone_idx(zone)); #else return 0; #endif @@ -922,6 +916,10 @@ static inline int zonelist_node_idx(struct zoneref *zoneref) #endif /* CONFIG_NUMA */ } +struct zoneref *__next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes); + /** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point * @z - The cursor used as a starting point for the search @@ -934,9 +932,14 @@ static inline int zonelist_node_idx(struct zoneref *zoneref) * being examined. It should be advanced by one before calling * next_zones_zonelist again. */ -struct zoneref *next_zones_zonelist(struct zoneref *z, +static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, - nodemask_t *nodes); + nodemask_t *nodes) +{ + if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) + return z; + return __next_zones_zonelist(z, highest_zoneidx, nodes); +} /** * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist @@ -952,13 +955,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, - nodemask_t *nodes, - struct zone **zone) + nodemask_t *nodes) { - struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs, + return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes); - *zone = zonelist_zone(z); - return z; } /** @@ -973,10 +973,17 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, * within a given nodemask */ #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ - for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ + for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ + zone; \ + z = next_zones_zonelist(++z, highidx, nodemask), \ + zone = zonelist_zone(z)) + +#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ + for (zone = z->zone; \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ - zone = zonelist_zone(z)) \ + zone = zonelist_zone(z)) + /** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index @@ -1056,7 +1063,7 @@ struct mem_section { unsigned long *pageblock_flags; #ifdef CONFIG_PAGE_EXTENSION /* - * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use + * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use * section. (see page_ext.h about this.) */ struct page_ext *page_ext; diff --git a/include/linux/module.h b/include/linux/module.h index 2bb0c3085706..3daf2b3a09d2 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -330,6 +330,15 @@ struct mod_kallsyms { char *strtab; }; +#ifdef CONFIG_LIVEPATCH +struct klp_modinfo { + Elf_Ehdr hdr; + Elf_Shdr *sechdrs; + char *secstrings; + unsigned int symndx; +}; +#endif + struct module { enum module_state state; @@ -456,7 +465,11 @@ struct module { #endif #ifdef CONFIG_LIVEPATCH + bool klp; /* Is this a livepatch module? */ bool klp_alive; + + /* Elf information */ + struct klp_modinfo *klp_info; #endif #ifdef CONFIG_MODULE_UNLOAD @@ -630,6 +643,18 @@ static inline bool module_requested_async_probing(struct module *module) return module && module->async_probe_requested; } +#ifdef CONFIG_LIVEPATCH +static inline bool is_livepatch_module(struct module *mod) +{ + return mod->klp; +} +#else /* !CONFIG_LIVEPATCH */ +static inline bool is_livepatch_module(struct module *mod) +{ + return false; +} +#endif /* CONFIG_LIVEPATCH */ + #else /* !CONFIG_MODULES... */ /* Given an address, look for it in the exception tables. */ diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h index c8be32e9fc49..ad3c3488073c 100644 --- a/include/linux/mtd/fsmc.h +++ b/include/linux/mtd/fsmc.h @@ -103,24 +103,6 @@ #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) -/* - * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 - * and it has to be read consecutively and immediately after the 512 - * byte data block for hardware to generate the error bit offsets - * Managing the ecc bytes in the following way is easier. This way is - * similar to oobfree structure maintained already in u-boot nand driver - */ -#define MAX_ECCPLACE_ENTRIES 32 - -struct fsmc_nand_eccplace { - uint8_t offset; - uint8_t length; -}; - -struct fsmc_eccplace { - struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; -}; - struct fsmc_nand_timings { uint8_t tclr; uint8_t tar; diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 5e0eb7ccabd4..3aa56e3104bb 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -122,18 +122,13 @@ #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 -# ifdef map_bankwidth -# undef map_bankwidth -# define map_bankwidth(map) ((map)->bankwidth) -# undef map_bankwidth_is_large -# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) -# undef map_words -# define map_words(map) map_calc_words(map) -# else -# define map_bankwidth(map) 32 -# define map_bankwidth_is_large(map) (1) -# define map_words(map) map_calc_words(map) -# endif +/* always use indirect access for 256-bit to preserve kernel stack */ +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 32 diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 771272187316..29a170612203 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -96,16 +96,35 @@ struct mtd_oob_ops { #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 +/** + * struct mtd_oob_region - oob region definition + * @offset: region offset + * @length: region length + * + * This structure describes a region of the OOB area, and is used + * to retrieve ECC or free bytes sections. + * Each section is defined by an offset within the OOB area and a + * length. + */ +struct mtd_oob_region { + u32 offset; + u32 length; +}; + /* - * Internal ECC layout control structure. For historical reasons, there is a - * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained - * for export to user-space via the ECCGETLAYOUT ioctl. - * nand_ecclayout should be expandable in the future simply by the above macros. + * struct mtd_ooblayout_ops - NAND OOB layout operations + * @ecc: function returning an ECC region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * ECC sections. + * @free: function returning a free region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * free sections. */ -struct nand_ecclayout { - __u32 eccbytes; - __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; - struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; +struct mtd_ooblayout_ops { + int (*ecc)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); + int (*free)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); }; struct module; /* only needed for owner field in mtd_info */ @@ -166,8 +185,8 @@ struct mtd_info { const char *name; int index; - /* ECC layout structure pointer - read only! */ - struct nand_ecclayout *ecclayout; + /* OOB layout description */ + const struct mtd_ooblayout_ops *ooblayout; /* the ecc step size. */ unsigned int ecc_step_size; @@ -253,6 +272,30 @@ struct mtd_info { int usecount; }; +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, + int *section, + struct mtd_oob_region *oobregion); +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); + +static inline void mtd_set_ooblayout(struct mtd_info *mtd, + const struct mtd_ooblayout_ops *ooblayout) +{ + mtd->ooblayout = ooblayout; +} + static inline void mtd_set_of_node(struct mtd_info *mtd, struct device_node *np) { @@ -283,17 +326,7 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); - -static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, - struct mtd_oob_ops *ops) -{ - ops->retlen = ops->oobretlen = 0; - if (!mtd->_write_oob) - return -EOPNOTSUPP; - if (!(mtd->flags & MTD_WRITEABLE)) - return -EROFS; - return mtd->_write_oob(mtd, to, ops); -} +int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops); int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 56574ba36555..fbe8e164a4ee 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -116,9 +116,14 @@ typedef enum { NAND_ECC_HW, NAND_ECC_HW_SYNDROME, NAND_ECC_HW_OOB_FIRST, - NAND_ECC_SOFT_BCH, } nand_ecc_modes_t; +enum nand_ecc_algo { + NAND_ECC_UNKNOWN, + NAND_ECC_HAMMING, + NAND_ECC_BCH, +}; + /* * Constants for Hardware ECC */ @@ -458,6 +463,7 @@ struct nand_hw_control { /** * struct nand_ecc_ctrl - Control structure for ECC * @mode: ECC mode + * @algo: ECC algorithm * @steps: number of ECC steps per page * @size: data bytes per ECC step * @bytes: ECC bytes per step @@ -466,7 +472,6 @@ struct nand_hw_control { * @prepad: padding information for syndrome based ECC generators * @postpad: padding information for syndrome based ECC generators * @options: ECC specific options (see NAND_ECC_XXX flags defined above) - * @layout: ECC layout control struct pointer * @priv: pointer to private ECC control data * @hwctl: function to control hardware ECC generator. Must only * be provided if an hardware ECC is available @@ -508,6 +513,7 @@ struct nand_hw_control { */ struct nand_ecc_ctrl { nand_ecc_modes_t mode; + enum nand_ecc_algo algo; int steps; int size; int bytes; @@ -516,7 +522,6 @@ struct nand_ecc_ctrl { int prepad; int postpad; unsigned int options; - struct nand_ecclayout *layout; void *priv; void (*hwctl)(struct mtd_info *mtd, int mode); int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, @@ -740,6 +745,9 @@ struct nand_chip { void *priv; }; +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; + static inline void nand_set_flash_node(struct nand_chip *chip, struct device_node *np) { @@ -1070,4 +1078,18 @@ int nand_check_erased_ecc_chunk(void *data, int datalen, void *ecc, int ecclen, void *extraoob, int extraooblen, int threshold); + +/* Default write_oob implementation */ +int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default write_oob syndrome implementation */ +int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); + +/* Default read_oob implementation */ +int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default read_oob syndrome implementation */ +int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); #endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 4596503c9da9..0aaa98b219a4 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -80,7 +80,6 @@ struct onenand_bufferram { * @page_buf: [INTERN] page main data buffer * @oob_buf: [INTERN] page oob data buffer * @subpagesize: [INTERN] holds the subpagesize - * @ecclayout: [REPLACEABLE] the default ecc placement scheme * @bbm: [REPLACEABLE] pointer to Bad Block Management * @priv: [OPTIONAL] pointer to private chip date */ @@ -134,7 +133,6 @@ struct onenand_chip { #endif int subpagesize; - struct nand_ecclayout *ecclayout; void *bbm; diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h index 25f4d2a845c1..65e91d0fa981 100644 --- a/include/linux/mtd/sharpsl.h +++ b/include/linux/mtd/sharpsl.h @@ -14,7 +14,7 @@ struct sharpsl_nand_platform_data { struct nand_bbt_descr *badblock_pattern; - struct nand_ecclayout *ecc_layout; + const struct mtd_ooblayout_ops *ecc_layout; struct mtd_partition *partitions; unsigned int nr_partitions; }; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 3c36113a88e1..7f041bd88b82 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -21,6 +21,7 @@ * Sometimes these are the same as CFI IDs, but sometimes they aren't. */ #define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_GIGADEVICE 0xc8 #define SNOR_MFR_INTEL CFI_MFR_INTEL #define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX diff --git a/include/linux/nd.h b/include/linux/nd.h index 5489ab756d1a..aee2761d294c 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -15,6 +15,7 @@ #include <linux/fs.h> #include <linux/ndctl.h> #include <linux/device.h> +#include <linux/badblocks.h> enum nvdimm_event { NVDIMM_REVALIDATE_POISON, @@ -55,13 +56,19 @@ static inline struct nd_namespace_common *to_ndns(struct device *dev) } /** - * struct nd_namespace_io - infrastructure for loading an nd_pmem instance + * struct nd_namespace_io - device representation of a persistent memory range * @dev: namespace device created by the nd region driver * @res: struct resource conversion of a NFIT SPA table + * @size: cached resource_size(@res) for fast path size checks + * @addr: virtual address to access the namespace range + * @bb: badblocks list for the namespace range */ struct nd_namespace_io { struct nd_namespace_common common; struct resource res; + resource_size_t size; + void __pmem *addr; + struct badblocks bb; }; /** @@ -82,6 +89,7 @@ struct nd_namespace_pmem { * @uuid: namespace name supplied in the dimm label * @id: ida allocated id * @lbasize: blk namespaces have a native sector size when btt not present + * @size: sum of all the resource ranges allocated to this namespace * @num_resources: number of dpa extents to claim * @res: discontiguous dpa extents for given dimm */ @@ -91,6 +99,7 @@ struct nd_namespace_blk { u8 *uuid; int id; unsigned long lbasize; + resource_size_t size; int num_resources; struct resource **res; }; diff --git a/include/linux/net.h b/include/linux/net.h index f840d77c6c31..9aa49a05fe38 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -218,8 +218,7 @@ int sock_create_lite(int family, int type, int proto, struct socket **res); struct socket *sock_alloc(void); void sock_release(struct socket *sock); int sock_sendmsg(struct socket *sock, struct msghdr *msg); -int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, - int flags); +int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); struct socket *sockfd_lookup(int fd, int *err); struct socket *sock_from_file(struct file *file, int *err); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index a734bf43d190..aa7b2400f98c 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -39,14 +39,19 @@ enum { NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ + NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */ NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ - NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */ - NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ + NETIF_F_GSO_IPXIP4_BIT, /* ... IP4 or IP6 over IP4 with TSO */ + NETIF_F_GSO_IPXIP6_BIT, /* ... IP4 or IP6 over IP6 with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ + NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 + * in hardware and all other + * headers in software. + */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, @@ -116,10 +121,12 @@ enum { #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) #define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM) -#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) -#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) +#define NETIF_F_GSO_IPXIP4 __NETIF_F(GSO_IPXIP4) +#define NETIF_F_GSO_IPXIP6 __NETIF_F(GSO_IPXIP6) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) +#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) +#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) @@ -145,10 +152,6 @@ enum { #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) -/* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ - NETIF_F_TSO6 | NETIF_F_UFO) - /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- * this would be contradictory @@ -156,11 +159,15 @@ enum { #define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ NETIF_F_HW_CSUM) -#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \ + NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID) #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ NETIF_F_FSO) +/* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO) + /* * If one device supports one of these features, then enable them * for all in netdev_increment_features. @@ -193,8 +200,8 @@ enum { #define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ - NETIF_F_GSO_IPIP | \ - NETIF_F_GSO_SIT | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 78181a88903b..f45929ce8157 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -106,7 +106,6 @@ enum netdev_tx { __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ NETDEV_TX_OK = 0x00, /* driver took care of packet */ NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ - NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ }; typedef enum netdev_tx netdev_tx_t; @@ -570,28 +569,27 @@ struct netdev_queue { #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) int numa_node; #endif + unsigned long tx_maxrate; + /* + * Number of TX timeouts for this queue + * (/sys/class/net/DEV/Q/trans_timeout) + */ + unsigned long trans_timeout; /* * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; int xmit_lock_owner; /* - * please use this field instead of dev->trans_start + * Time (in jiffies) of last Tx */ unsigned long trans_start; - /* - * Number of TX timeouts for this queue - * (/sys/class/net/DEV/Q/trans_timeout) - */ - unsigned long trans_timeout; - unsigned long state; #ifdef CONFIG_BQL struct dql dql; #endif - unsigned long tx_maxrate; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -831,7 +829,6 @@ struct tc_to_netdev { * the queue before that can happen; it's for obsolete devices and weird * corner cases, but the stack really does a non-trivial amount * of useless work if you return NETDEV_TX_BUSY. - * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required; cannot be NULL. * * netdev_features_t (*ndo_fix_features)(struct net_device *dev, @@ -1548,7 +1545,6 @@ enum netdev_priv_flags { * * @offload_fwd_mark: Offload device fwding mark * - * @trans_start: Time (in jiffies) of last Tx * @watchdog_timeo: Represents the timeout that is used by * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers @@ -1586,8 +1582,6 @@ enum netdev_priv_flags { * @gso_max_size: Maximum size of generic segmentation offload * @gso_max_segs: Maximum number of segments that can be passed to the * NIC for GSO - * @gso_min_segs: Minimum number of segments that can be passed to the - * NIC for GSO * * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device @@ -1656,6 +1650,7 @@ struct net_device { netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; + netdev_features_t gso_partial_features; int ifindex; int group; @@ -1798,13 +1793,6 @@ struct net_device { #endif /* These may be needed for future network-power-down code. */ - - /* - * trans_start here is expensive for high speed devices on SMP, - * please use netdev_queue->trans_start instead. - */ - unsigned long trans_start; - struct timer_list watchdog_timer; int __percpu *pcpu_refcnt; @@ -1858,7 +1846,7 @@ struct net_device { unsigned int gso_max_size; #define GSO_MAX_SEGS 65535 u16 gso_max_segs; - u16 gso_min_segs; + #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif @@ -2123,7 +2111,10 @@ struct napi_gro_cb { /* Used in GRE, set in fou/gue_gro_receive */ u8 is_fou:1; - /* 6 bit hole */ + /* Used to determine if flush_id can be ignored */ + u8 is_atomic:1; + + /* 5 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2162,26 +2153,6 @@ struct packet_offload { struct list_head list; }; -struct udp_offload; - -/* 'skb->encapsulation' is set before gro_complete() is called. gro_complete() - * must set 'skb->inner_mac_header' to the beginning of tunnel payload. - */ -struct udp_offload_callbacks { - struct sk_buff **(*gro_receive)(struct sk_buff **head, - struct sk_buff *skb, - struct udp_offload *uoff); - int (*gro_complete)(struct sk_buff *skb, - int nhoff, - struct udp_offload *uoff); -}; - -struct udp_offload { - __be16 port; - u8 ipproto; - struct udp_offload_callbacks callbacks; -}; - /* often modified stats are per-CPU, other are shared (netdev->stats) */ struct pcpu_sw_netstats { u64 rx_packets; @@ -2262,6 +2233,8 @@ struct netdev_lag_lower_state_info { #define NETDEV_BONDING_INFO 0x0019 #define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_CHANGELOWERSTATE 0x001B +#define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C +#define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2753,7 +2726,6 @@ struct softnet_data { /* stats */ unsigned int processed; unsigned int time_squeeze; - unsigned int cpu_collision; unsigned int received_rps; #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; @@ -2766,11 +2738,15 @@ struct softnet_data { struct sk_buff *completion_queue; #ifdef CONFIG_RPS - /* Elements below can be accessed between CPUs for RPS */ + /* input_queue_head should be written by cpu owning this struct, + * and only read by other cpus. Worth using a cache line. + */ + unsigned int input_queue_head ____cacheline_aligned_in_smp; + + /* Elements below can be accessed between CPUs for RPS/RFS */ struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; unsigned int cpu; - unsigned int input_queue_head; unsigned int input_queue_tail; #endif unsigned int dropped; @@ -2807,7 +2783,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev) netif_schedule_queue(netdev_get_tx_queue(dev, i)); } -static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) +static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) { clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } @@ -2857,7 +2833,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) } } -static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) +static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) { set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } @@ -3279,7 +3255,10 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); -bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); +bool is_skb_forwardable(const struct net_device *dev, + const struct sk_buff *skb); + +void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; @@ -3496,6 +3475,15 @@ static inline void txq_trans_update(struct netdev_queue *txq) txq->trans_start = jiffies; } +/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ +static inline void netif_trans_update(struct net_device *dev) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + + if (txq->trans_start != jiffies) + txq->trans_start = jiffies; +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -3771,7 +3759,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; extern int weight_p; -extern int bpf_jit_enable; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, @@ -4014,14 +4001,16 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index f48b8a664b0f..83b9a2e0d8d4 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -351,7 +351,8 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) return ((skbinfo->skbmark || skbinfo->skbmarkmask) && nla_put_net64(skb, IPSET_ATTR_SKBMARK, cpu_to_be64((u64)skbinfo->skbmark << 32 | - skbinfo->skbmarkmask))) || + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || (skbinfo->skbprio && nla_put_net32(skb, IPSET_ATTR_SKBPRIO, cpu_to_be32(skbinfo->skbprio))) || @@ -374,9 +375,11 @@ static inline bool ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) { return nla_put_net64(skb, IPSET_ATTR_BYTES, - cpu_to_be64(ip_set_get_bytes(counter))) || + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || nla_put_net64(skb, IPSET_ATTR_PACKETS, - cpu_to_be64(ip_set_get_packets(counter))); + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); } static inline void diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 80a305b85323..dc4f58a3cdcc 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -242,11 +242,18 @@ void xt_unregister_match(struct xt_match *target); int xt_register_matches(struct xt_match *match, unsigned int n); void xt_unregister_matches(struct xt_match *match, unsigned int n); +int xt_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat); + struct xt_table *xt_register_table(struct net *net, const struct xt_table *table, struct xt_table_info *bootstrap, @@ -373,16 +380,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a, * allows us to return 0 for single core systems without forcing * callers to deal with SMP vs. NONSMP issues. */ -static inline u64 xt_percpu_counter_alloc(void) +static inline unsigned long xt_percpu_counter_alloc(void) { if (nr_cpu_ids > 1) { void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), sizeof(struct xt_counters)); if (res == NULL) - return (u64) -ENOMEM; + return -ENOMEM; - return (u64) (__force unsigned long) res; + return (__force unsigned long) res; } return 0; @@ -480,7 +487,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number); int xt_compat_calc_jump(u_int8_t af, unsigned int offset); int xt_compat_match_offset(const struct xt_match *match); -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, unsigned int *size); int xt_compat_match_to_user(const struct xt_entry_match *m, void __user **dstptr, unsigned int *size); @@ -490,6 +497,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, unsigned int *size); int xt_compat_target_to_user(const struct xt_entry_target *t, void __user **dstptr, unsigned int *size); +int xt_compat_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); #endif /* CONFIG_COMPAT */ #endif /* _X_TABLES_H */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 67300f8e5f2f..d71278c3c5bd 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -163,11 +163,9 @@ struct nfs_inode { /* Open contexts for shared mmap writes */ struct list_head open_files; - /* Number of in-flight sillydelete RPC calls */ - atomic_t silly_count; - /* List of deferred sillydelete requests */ - struct hlist_head silly_list; - wait_queue_head_t waitqueue; + /* Readers: in-flight sillydelete RPC calls */ + /* Writers: rmdir */ + struct rw_semaphore rmdir_sem; #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; @@ -445,10 +443,9 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) /* * linux/fs/nfs/direct.c */ -extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t); +extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); extern ssize_t nfs_file_direct_read(struct kiocb *iocb, - struct iov_iter *iter, - loff_t pos); + struct iov_iter *iter); extern ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter); @@ -492,9 +489,6 @@ extern void nfs_release_automount_timer(void); * linux/fs/nfs/unlink.c */ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); -extern void nfs_wait_on_sillyrename(struct dentry *dentry); -extern void nfs_block_sillyrename(struct dentry *dentry); -extern void nfs_unblock_sillyrename(struct dentry *dentry); /* * linux/fs/nfs/write.c diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index d320906cf13e..ee8491dadbf3 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1468,10 +1468,10 @@ struct nfs_pgio_completion_ops { }; struct nfs_unlinkdata { - struct hlist_node list; struct nfs_removeargs args; struct nfs_removeres res; - struct inode *dir; + struct dentry *dentry; + wait_queue_head_t wq; struct rpc_cred *cred; struct nfs_fattr dir_attr; long timeout; diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index e9fcf90b270d..5988dd57ba66 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -13,12 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * - * You should have received a copy of the GNU Lesser General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net> - * Ryusuke Konishi <ryusuke@osrg.net> + * Written by Koji Sato and Ryusuke Konishi. */ /* * linux/include/linux/ext2_fs.h @@ -132,10 +127,14 @@ struct nilfs_super_root { #define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ #define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ -#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order - semantics also for data */ -#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during - mount-time recovery */ +#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* + * Apply strict in-order + * semantics also for data + */ +#define NILFS_MOUNT_NORECOVERY 0x4000 /* + * Disable write access during + * mount-time recovery + */ #define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */ @@ -147,16 +146,20 @@ struct nilfs_super_block { __le16 s_minor_rev_level; /* minor revision level */ __le16 s_magic; /* Magic signature */ - __le16 s_bytes; /* Bytes count of CRC calculation - for this structure. s_reserved - is excluded. */ + __le16 s_bytes; /* + * Bytes count of CRC calculation + * for this structure. s_reserved + * is excluded. + */ __le16 s_flags; /* flags */ __le32 s_crc_seed; /* Seed value of CRC calculation */ /*10*/ __le32 s_sum; /* Check sum of super block */ - __le32 s_log_block_size; /* Block size represented as follows - blocksize = - 1 << (s_log_block_size + 10) */ + __le32 s_log_block_size; /* + * Block size represented as follows + * blocksize = + * 1 << (s_log_block_size + 10) + */ __le64 s_nsegments; /* Number of segments in filesystem */ /*20*/ __le64 s_dev_size; /* block device size in bytes */ __le64 s_first_data_block; /* 1st seg disk block number */ @@ -168,8 +171,10 @@ struct nilfs_super_block { __le64 s_last_seq; /* seq. number of seg written last */ /*50*/ __le64 s_free_blocks_count; /* Free blocks count */ - __le64 s_ctime; /* Creation time (execution time of - newfs) */ + __le64 s_ctime; /* + * Creation time (execution time of + * newfs) + */ /*60*/ __le64 s_mtime; /* Mount time */ __le64 s_wtime; /* Write time */ /*70*/ __le16 s_mnt_count; /* Mount count */ @@ -193,8 +198,10 @@ struct nilfs_super_block { /*A8*/ char s_volume_name[80]; /* volume name */ /*F8*/ __le32 s_c_interval; /* Commit interval of segment */ - __le32 s_c_block_max; /* Threshold of data amount for - the segment construction */ + __le32 s_c_block_max; /* + * Threshold of data amount for + * the segment construction + */ /*100*/ __le64 s_feature_compat; /* Compatible feature set */ __le64 s_feature_compat_ro; /* Read-only compatible feature set */ __le64 s_feature_incompat; /* Incompatible feature set */ @@ -247,12 +254,18 @@ struct nilfs_super_block { #define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */ -#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in - a full segment */ -#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in - a partial segment */ -#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved - segments */ +#define NILFS_SEG_MIN_BLOCKS 16 /* + * Minimum number of blocks in + * a full segment + */ +#define NILFS_PSEG_MIN_BLOCKS 2 /* + * Minimum number of blocks in + * a partial segment + */ +#define NILFS_MIN_NRSVSEGS 8 /* + * Minimum number of reserved + * segments + */ /* * We call DAT, cpfile, and sufile root metadata files. Inodes of @@ -327,9 +340,9 @@ enum { ~NILFS_DIR_ROUND) #define NILFS_MAX_REC_LEN ((1<<16)-1) -static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) +static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) { - unsigned len = le16_to_cpu(dlen); + unsigned int len = le16_to_cpu(dlen); #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == NILFS_MAX_REC_LEN) @@ -338,7 +351,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) return len; } -static inline __le16 nilfs_rec_len_to_disk(unsigned len) +static inline __le16 nilfs_rec_len_to_disk(unsigned int len) { #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == (1 << 16)) @@ -518,9 +531,11 @@ struct nilfs_checkpoint { __le64 cp_inodes_count; __le64 cp_blocks_count; - /* Do not change the byte offset of ifile inode. - To keep the compatibility of the disk format, - additional fields should be added behind cp_ifile_inode. */ + /* + * Do not change the byte offset of ifile inode. + * To keep the compatibility of the disk format, + * additional fields should be added behind cp_ifile_inode. + */ struct nilfs_inode cp_ifile_inode; }; diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 167342c2ce6b..0f6f6607f592 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -92,6 +92,8 @@ enum { IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, + IEEE802154_ATTR_PAD, + __IEEE802154_ATTR_MAX, }; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 6e85889cf9ab..f746e44d4046 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -43,8 +43,10 @@ * * int first_node(mask) Number lowest set bit, or MAX_NUMNODES * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES + * int next_node_in(node, mask) Next node past 'node', or wrap to first, + * or MAX_NUMNODES * int first_unset_node(mask) First node not set in mask, or - * MAX_NUMNODES. + * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * NODE_MASK_ALL Initializer - all bits set @@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp) return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } +/* + * Find the next present node in src, starting after node n, wrapping around to + * the first node in src if needed. Returns MAX_NUMNODES if src is empty. + */ +#define next_node_in(n, src) __next_node_in((n), &(src)) +int __next_node_in(int node, const nodemask_t *srcp); + static inline void init_nodemask_of_node(nodemask_t *mask, int node) { nodes_clear(*mask); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index a55986f6fe38..7d51b2904cb7 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -21,13 +21,13 @@ enum { NVME_REG_CAP = 0x0000, /* Controller Capabilities */ NVME_REG_VS = 0x0008, /* Version */ NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */ - NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */ + NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */ NVME_REG_CC = 0x0014, /* Controller Configuration */ NVME_REG_CSTS = 0x001c, /* Controller Status */ NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */ NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */ NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */ - NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */ + NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */ NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */ }; diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index a4fcc90b0f20..cd93416d762e 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -14,6 +14,10 @@ struct nvmem_device; struct nvmem_cell_info; +typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, + void *val, size_t bytes); +typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, + void *val, size_t bytes); struct nvmem_config { struct device *dev; @@ -24,6 +28,12 @@ struct nvmem_config { int ncells; bool read_only; bool root_only; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; /* To be only used by old driver/misc/eeprom drivers */ bool compat; struct device *base_dev; diff --git a/include/linux/of.h b/include/linux/of.h index 31758036787c..c7292e8ea080 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -75,6 +75,23 @@ struct of_phandle_args { uint32_t args[MAX_PHANDLE_ARGS]; }; +struct of_phandle_iterator { + /* Common iterator information */ + const char *cells_name; + int cell_count; + const struct device_node *parent; + + /* List size information */ + const __be32 *list_end; + const __be32 *phandle_end; + + /* Current position state */ + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + struct of_reconfig_data { struct device_node *dn; struct property *prop; @@ -334,6 +351,18 @@ extern int of_parse_phandle_with_fixed_args(const struct device_node *np, extern int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name); +/* phandle iterator functions */ +extern int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count); + +extern int of_phandle_iterator_next(struct of_phandle_iterator *it); +extern int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size); + extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); @@ -608,6 +637,27 @@ static inline int of_count_phandle_with_args(struct device_node *np, return -ENOSYS; } +static inline int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_next(struct of_phandle_iterator *it) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size) +{ + return 0; +} + static inline int of_alias_get_id(struct device_node *np, const char *stem) { return -ENOSYS; @@ -685,6 +735,15 @@ static inline int of_node_to_nid(struct device_node *device) } #endif +#ifdef CONFIG_OF_NUMA +extern int of_numa_init(void); +#else +static inline int of_numa_init(void) +{ + return -ENOSYS; +} +#endif + static inline struct device_node *of_find_matching_node( struct device_node *from, const struct of_device_id *matches) @@ -868,6 +927,12 @@ static inline int of_property_read_s32(const struct device_node *np, return of_property_read_u32(np, propname, (u32*) out_value); } +#define of_for_each_phandle(it, err, np, ln, cn, cc) \ + for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \ + err = of_phandle_iterator_next(it); \ + err == 0; \ + err = of_phandle_iterator_next(it)) + #define of_property_for_each_u32(np, propname, prop, p, u) \ for (prop = of_find_property(np, propname, NULL), \ p = of_prop_next_u32(prop, NULL, &u); \ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 01c0a556448b..37864734ca50 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -47,10 +47,6 @@ void __iomem *of_io_request_and_map(struct device_node *device, extern const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags); -extern int pci_register_io_range(phys_addr_t addr, resource_size_t size); -extern unsigned long pci_address_to_pio(phys_addr_t addr); -extern phys_addr_t pci_pio_to_address(unsigned long pio); - extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); extern struct of_pci_range *of_pci_range_parser_one( @@ -86,11 +82,6 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index, return NULL; } -static inline phys_addr_t pci_pio_to_address(unsigned long pio) -{ - return 0; -} - static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node) { diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 2fbe8682a66f..901ec01c9fba 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -37,8 +37,9 @@ extern bool of_fdt_is_big_endian(const void *blob, unsigned long node); extern int of_fdt_match(const void *blob, unsigned long node, const char *const *compat); -extern void of_fdt_unflatten_tree(const unsigned long *blob, - struct device_node **mynodes); +extern void *of_fdt_unflatten_tree(const unsigned long *blob, + struct device_node *dad, + struct device_node **mynodes); /* TBD: Temporary export of fdt globals - remove when code fully merged */ extern int __initdata dt_root_addr_cells; diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index f8bcd0e21a26..bb3a5a2cd570 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -15,6 +15,7 @@ #define __LINUX_OF_GRAPH_H #include <linux/types.h> +#include <linux/errno.h> /** * struct of_endpoint - the OF graph endpoint data structure diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index ffbe4707d4aa..bd02b44902d0 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix, size_t *size); extern void of_iommu_init(void); -extern struct iommu_ops *of_iommu_configure(struct device *dev, +extern const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np); #else @@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, } static inline void of_iommu_init(void) { } -static inline struct iommu_ops *of_iommu_configure(struct device *dev, +static inline const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np) { return NULL; @@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev, #endif /* CONFIG_OF_IOMMU */ -void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); -struct iommu_ops *of_iommu_get_ops(struct device_node *np); +void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops); +const struct iommu_ops *of_iommu_get_ops(struct device_node *np); extern struct of_device_id __iommu_of_table; diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h deleted file mode 100644 index e266caa36402..000000000000 --- a/include/linux/of_mtd.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> - * - * OF helpers for mtd. - * - * This file is released under the GPLv2 - */ - -#ifndef __LINUX_OF_MTD_H -#define __LINUX_OF_MTD_H - -#ifdef CONFIG_OF_MTD - -#include <linux/of.h> -int of_get_nand_ecc_mode(struct device_node *np); -int of_get_nand_ecc_step_size(struct device_node *np); -int of_get_nand_ecc_strength(struct device_node *np); -int of_get_nand_bus_width(struct device_node *np); -bool of_get_nand_on_flash_bbt(struct device_node *np); - -#else /* CONFIG_OF_MTD */ - -static inline int of_get_nand_ecc_mode(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_ecc_step_size(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_ecc_strength(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_bus_width(struct device_node *np) -{ - return -ENOSYS; -} - -static inline bool of_get_nand_on_flash_bbt(struct device_node *np) -{ - return false; -} - -#endif /* CONFIG_OF_MTD */ - -#endif /* __LINUX_OF_MTD_H */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index d833eb4dd446..9e9d79e8efa5 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -7,161 +7,53 @@ * option) any later version. */ -/* Maximum Number of Chip Selects */ -#define GPMC_CS_NUM 8 +#include <linux/platform_data/gpmc-omap.h> #define GPMC_CONFIG_WP 0x00000005 -#define GPMC_IRQ_FIFOEVENTENABLE 0x01 -#define GPMC_IRQ_COUNT_EVENT 0x02 - -#define GPMC_BURST_4 4 /* 4 word burst */ -#define GPMC_BURST_8 8 /* 8 word burst */ -#define GPMC_BURST_16 16 /* 16 word burst */ -#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ -#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ -#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ -#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ - -/* bool type time settings */ -struct gpmc_bool_timings { - bool cycle2cyclediffcsen; - bool cycle2cyclesamecsen; - bool we_extra_delay; - bool oe_extra_delay; - bool adv_extra_delay; - bool cs_extra_delay; - bool time_para_granularity; -}; +/* IRQ numbers in GPMC IRQ domain for legacy boot use */ +#define GPMC_IRQ_FIFOEVENTENABLE 0 +#define GPMC_IRQ_COUNT_EVENT 1 -/* - * Note that all values in this struct are in nanoseconds except sync_clk - * (which is in picoseconds), while the register values are in gpmc_fck cycles. +/** + * gpmc_nand_ops - Interface between NAND and GPMC + * @nand_write_buffer_empty: get the NAND write buffer empty status. */ -struct gpmc_timings { - /* Minimum clock period for synchronous mode (in picoseconds) */ - u32 sync_clk; - - /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ - u32 cs_on; /* Assertion time */ - u32 cs_rd_off; /* Read deassertion time */ - u32 cs_wr_off; /* Write deassertion time */ - - /* ADV signal timings corresponding to GPMC_CONFIG3 */ - u32 adv_on; /* Assertion time */ - u32 adv_rd_off; /* Read deassertion time */ - u32 adv_wr_off; /* Write deassertion time */ - u32 adv_aad_mux_on; /* ADV assertion time for AAD */ - u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ - u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ - - /* WE signals timings corresponding to GPMC_CONFIG4 */ - u32 we_on; /* WE assertion time */ - u32 we_off; /* WE deassertion time */ - - /* OE signals timings corresponding to GPMC_CONFIG4 */ - u32 oe_on; /* OE assertion time */ - u32 oe_off; /* OE deassertion time */ - u32 oe_aad_mux_on; /* OE assertion time for AAD */ - u32 oe_aad_mux_off; /* OE deassertion time for AAD */ - - /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ - u32 page_burst_access; /* Multiple access word delay */ - u32 access; /* Start-cycle to first data valid delay */ - u32 rd_cycle; /* Total read cycle time */ - u32 wr_cycle; /* Total write cycle time */ - - u32 bus_turnaround; - u32 cycle2cycle_delay; - - u32 wait_monitoring; - u32 clk_activation; - - /* The following are only on OMAP3430 */ - u32 wr_access; /* WRACCESSTIME */ - u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ - - struct gpmc_bool_timings bool_timings; +struct gpmc_nand_ops { + bool (*nand_writebuffer_empty)(void); }; -/* Device timings in picoseconds */ -struct gpmc_device_timings { - u32 t_ceasu; /* address setup to CS valid */ - u32 t_avdasu; /* address setup to ADV valid */ - /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is - * of tusb using these timings even for sync whilst - * ideally for adv_rd/(wr)_off it should have considered - * t_avdh instead. This indirectly necessitates r/w - * variations of t_avdp as it is possible to have one - * sync & other async - */ - u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ - u32 t_avdp_w; - u32 t_aavdh; /* address hold time */ - u32 t_oeasu; /* address setup to OE valid */ - u32 t_aa; /* access time from ADV assertion */ - u32 t_iaa; /* initial access time */ - u32 t_oe; /* access time from OE assertion */ - u32 t_ce; /* access time from CS asertion */ - u32 t_rd_cycle; /* read cycle time */ - u32 t_cez_r; /* read CS deassertion to high Z */ - u32 t_cez_w; /* write CS deassertion to high Z */ - u32 t_oez; /* OE deassertion to high Z */ - u32 t_weasu; /* address setup to WE valid */ - u32 t_wpl; /* write assertion time */ - u32 t_wph; /* write deassertion time */ - u32 t_wr_cycle; /* write cycle time */ - - u32 clk; - u32 t_bacc; /* burst access valid clock to output delay */ - u32 t_ces; /* CS setup time to clk */ - u32 t_avds; /* ADV setup time to clk */ - u32 t_avdh; /* ADV hold time from clk */ - u32 t_ach; /* address hold time from clk */ - u32 t_rdyo; /* clk to ready valid */ - - u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ - u32 t_ce_avd; /* CS on to ADV on delay */ - - /* XXX: check the possibility of combining - * cyc_aavhd_oe & cyc_aavdh_we - */ - u8 cyc_aavdh_oe;/* read address hold time in cycles */ - u8 cyc_aavdh_we;/* write address hold time in cycles */ - u8 cyc_oe; /* access time from OE assertion in cycles */ - u8 cyc_wpl; /* write deassertion time in cycles */ - u32 cyc_iaa; /* initial access time in cycles */ - - /* extra delays */ - bool ce_xdelay; - bool avd_xdelay; - bool oe_xdelay; - bool we_xdelay; -}; +struct gpmc_nand_regs; -struct gpmc_settings { - bool burst_wrap; /* enables wrap bursting */ - bool burst_read; /* enables read page/burst mode */ - bool burst_write; /* enables write page/burst mode */ - bool device_nand; /* device is NAND */ - bool sync_read; /* enables synchronous reads */ - bool sync_write; /* enables synchronous writes */ - bool wait_on_read; /* monitor wait on reads */ - bool wait_on_write; /* monitor wait on writes */ - u32 burst_len; /* page/burst length */ - u32 device_width; /* device bus width (8 or 16 bit) */ - u32 mux_add_data; /* multiplex address & data */ - u32 wait_pin; /* wait-pin to be used */ -}; +#if IS_ENABLED(CONFIG_OMAP_GPMC) +struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs); +#else +static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs) +{ + return NULL; +} +#endif /* CONFIG_OMAP_GPMC */ + +/*--------------------------------*/ + +/* deprecated APIs */ +#if IS_ENABLED(CONFIG_OMAP_GPMC) +void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); +#else +static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) +{ +} +#endif /* CONFIG_OMAP_GPMC */ +/*--------------------------------*/ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, struct gpmc_settings *gpmc_s, struct gpmc_device_timings *dev_t); -struct gpmc_nand_regs; struct device_node; -extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); extern int gpmc_get_client_irq(unsigned irq_config); extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index 587bbdd31f5a..c726bd833761 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -21,8 +21,6 @@ struct mbox_client; struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, const char *chan_name); -void omap_mbox_save_ctx(struct mbox_chan *chan); -void omap_mbox_restore_ctx(struct mbox_chan *chan); void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); diff --git a/include/linux/oom.h b/include/linux/oom.h index 628a43242a34..83469522690a 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -50,28 +50,33 @@ enum oom_scan_t { OOM_SCAN_SELECT, /* always select this thread first */ }; -/* Thread is the potential origin of an oom condition; kill first on oom */ -#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) - extern struct mutex oom_lock; static inline void set_current_oom_origin(void) { - current->signal->oom_flags |= OOM_FLAG_ORIGIN; + current->signal->oom_flag_origin = true; } static inline void clear_current_oom_origin(void) { - current->signal->oom_flags &= ~OOM_FLAG_ORIGIN; + current->signal->oom_flag_origin = false; } static inline bool oom_task_origin(const struct task_struct *p) { - return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); + return p->signal->oom_flag_origin; } extern void mark_oom_victim(struct task_struct *tsk); +#ifdef CONFIG_MMU +extern void try_oom_reaper(struct task_struct *tsk); +#else +static inline void try_oom_reaper(struct task_struct *tsk) +{ +} +#endif + extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); @@ -102,13 +107,24 @@ extern struct task_struct *find_lock_task_mm(struct task_struct *p); static inline bool task_will_free_mem(struct task_struct *task) { + struct signal_struct *sig = task->signal; + /* * A coredumping process may sleep for an extended period in exit_mm(), * so the oom killer cannot assume that the process will promptly exit * and release memory. */ - return (task->flags & PF_EXITING) && - !(task->signal->flags & SIGNAL_GROUP_COREDUMP); + if (sig->flags & SIGNAL_GROUP_COREDUMP) + return false; + + if (!(task->flags & PF_EXITING)) + return false; + + /* Make sure that the whole thread group is going down */ + if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT)) + return false; + + return true; } /* sysctls */ diff --git a/include/linux/padata.h b/include/linux/padata.h index 438694650471..113ee626a4dc 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -175,11 +175,6 @@ extern int padata_do_parallel(struct padata_instance *pinst, extern void padata_do_serial(struct padata_priv *padata); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, - cpumask_var_t cbcpumask); -extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); -extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); extern int padata_register_cpumask_notifier(struct padata_instance *pinst, diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b052aa7b5b7..e5a32445f930 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -371,10 +371,15 @@ PAGEFLAG(Idle, idle, PF_ANY) #define PAGE_MAPPING_KSM 2 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) +static __always_inline int PageAnonHead(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + static __always_inline int PageAnon(struct page *page) { page = compound_head(page); - return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; + return PageAnonHead(page); } #ifdef CONFIG_KSM @@ -474,7 +479,7 @@ static inline void ClearPageCompound(struct page *page) } #endif -#define PG_head_mask ((1L << PG_head)) +#define PG_head_mask ((1UL << PG_head)) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); @@ -665,7 +670,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) } #ifdef CONFIG_MMU -#define __PG_MLOCKED (1 << PG_mlocked) +#define __PG_MLOCKED (1UL << PG_mlocked) #else #define __PG_MLOCKED 0 #endif @@ -675,11 +680,11 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * these flags set. It they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ - (1 << PG_lru | 1 << PG_locked | \ - 1 << PG_private | 1 << PG_private_2 | \ - 1 << PG_writeback | 1 << PG_reserved | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED) + (1UL << PG_lru | 1UL << PG_locked | \ + 1UL << PG_private | 1UL << PG_private_2 | \ + 1UL << PG_writeback | 1UL << PG_reserved | \ + 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ + 1UL << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. @@ -690,10 +695,10 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ - (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) + (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ - (1 << PG_private | 1 << PG_private_2) + (1UL << PG_private | 1UL << PG_private_2) /** * page_has_private - Determine if page has private stuff * @page: The page to be checked diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index e596d5d9540e..8b5e0a9f2431 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -63,17 +63,17 @@ static inline void __page_ref_unfreeze(struct page *page, int v) static inline int page_ref_count(struct page *page) { - return atomic_read(&page->_count); + return atomic_read(&page->_refcount); } static inline int page_count(struct page *page) { - return atomic_read(&compound_head(page)->_count); + return atomic_read(&compound_head(page)->_refcount); } static inline void set_page_count(struct page *page, int v) { - atomic_set(&page->_count, v); + atomic_set(&page->_refcount, v); if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) __page_ref_set(page, v); } @@ -89,35 +89,35 @@ static inline void init_page_count(struct page *page) static inline void page_ref_add(struct page *page, int nr) { - atomic_add(nr, &page->_count); + atomic_add(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, nr); } static inline void page_ref_sub(struct page *page, int nr) { - atomic_sub(nr, &page->_count); + atomic_sub(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -nr); } static inline void page_ref_inc(struct page *page) { - atomic_inc(&page->_count); + atomic_inc(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, 1); } static inline void page_ref_dec(struct page *page) { - atomic_dec(&page->_count); + atomic_dec(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -1); } static inline int page_ref_sub_and_test(struct page *page, int nr) { - int ret = atomic_sub_and_test(nr, &page->_count); + int ret = atomic_sub_and_test(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -nr, ret); @@ -126,7 +126,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr) static inline int page_ref_dec_and_test(struct page *page) { - int ret = atomic_dec_and_test(&page->_count); + int ret = atomic_dec_and_test(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -1, ret); @@ -135,7 +135,7 @@ static inline int page_ref_dec_and_test(struct page *page) static inline int page_ref_dec_return(struct page *page) { - int ret = atomic_dec_return(&page->_count); + int ret = atomic_dec_return(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) __page_ref_mod_and_return(page, -1, ret); @@ -144,7 +144,7 @@ static inline int page_ref_dec_return(struct page *page) static inline int page_ref_add_unless(struct page *page, int nr, int u) { - int ret = atomic_add_unless(&page->_count, nr, u); + int ret = atomic_add_unless(&page->_refcount, nr, u); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); @@ -153,7 +153,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u) static inline int page_ref_freeze(struct page *page, int count) { - int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count); + int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) __page_ref_freeze(page, count, ret); @@ -165,7 +165,7 @@ static inline void page_ref_unfreeze(struct page *page, int count) VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); - atomic_set(&page->_count, count); + atomic_set(&page->_refcount, count); if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) __page_ref_unfreeze(page, count); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7e1ab155c67c..97354102794d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -90,12 +90,12 @@ void release_pages(struct page **pages, int nr, bool cold); /* * speculatively take a reference to a page. - * If the page is free (_count == 0), then _count is untouched, and 0 - * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * If the page is free (_refcount == 0), then _refcount is untouched, and 0 + * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. * * This function must be called inside the same rcu_read_lock() section as has * been used to lookup the page in the pagecache radix-tree (or page table): - * this allows allocators to use a synchronize_rcu() to stabilize _count. + * this allows allocators to use a synchronize_rcu() to stabilize _refcount. * * Unless an RCU grace period has passed, the count of all pages coming out * of the allocator must be considered unstable. page_count may return higher @@ -111,7 +111,7 @@ void release_pages(struct page **pages, int nr, bool cold); * 2. conditionally increment refcount * 3. check the page is still in pagecache (if no, goto 1) * - * Remove-side that cares about stability of _count (eg. reclaim) has the + * Remove-side that cares about stability of _refcount (eg. reclaim) has the * following (with tree_lock held for write): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache @@ -518,33 +518,27 @@ void page_endio(struct page *page, int rw, int err); extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* - * Fault a userspace page into pagetables. Return non-zero on a fault. - * - * This assumes that two userspace pages are always sufficient. + * Fault one or two userspace pages into pagetables. + * Return -EINVAL if more than two pages would be needed. + * Return non-zero on a fault. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { - int ret; + int span, ret; if (unlikely(size == 0)) return 0; + span = offset_in_page(uaddr) + size; + if (span > 2 * PAGE_SIZE) + return -EINVAL; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ ret = __put_user(0, uaddr); - if (ret == 0) { - char __user *end = uaddr + size - 1; - - /* - * If the page was already mapped, this will get a cache miss - * for sure, so try to avoid doing it. - */ - if (((unsigned long)uaddr & PAGE_MASK) != - ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); - } + if (ret == 0 && span > PAGE_SIZE) + ret = __put_user(0, uaddr + size - 1); return ret; } diff --git a/include/linux/pci.h b/include/linux/pci.h index 932ec74909c6..b67e4df20801 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -166,8 +166,6 @@ enum pci_dev_flags { PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), /* Flag for quirk use to store if quirk-specific ACS is enabled */ PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), - /* Flag to indicate the device uses dma_alias_devfn */ - PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), /* Do not use bus resets for device */ @@ -273,7 +271,7 @@ struct pci_dev { u8 rom_base_reg; /* which config register controls the ROM */ u8 pin; /* which interrupt pin this device uses */ u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ - u8 dma_alias_devfn;/* devfn of DMA alias, if any */ + unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ struct pci_driver *driver; /* which driver has allocated this device */ u64 dma_mask; /* Mask of the bits of bus address this @@ -1165,6 +1163,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); +int pci_register_io_range(phys_addr_t addr, resource_size_t size); +unsigned long pci_address_to_pio(phys_addr_t addr); +phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) @@ -1481,6 +1482,8 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } +static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } + static inline void pci_block_cfg_access(struct pci_dev *dev) { } static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) { return 0; } @@ -1664,7 +1667,7 @@ enum pci_fixup_pass { #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); -void pci_dev_specific_enable_acs(struct pci_dev *dev); +int pci_dev_specific_enable_acs(struct pci_dev *dev); #else static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } @@ -1673,7 +1676,10 @@ static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, { return -ENOTTY; } -static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { } +static inline int pci_dev_specific_enable_acs(struct pci_dev *dev) +{ + return -ENOTTY; +} #endif void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); @@ -1989,6 +1995,8 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) } #endif +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn); +bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 247da8c95860..c58752fe16c4 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2604,6 +2604,24 @@ #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82380FB 0x124b #define PCI_DEVICE_ID_INTEL_82439 0x1250 +#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */ +#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a +#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */ +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548 +#define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */ +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569 +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */ +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */ +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index 4f1089f2cc98..afcd130ab3a9 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h @@ -21,6 +21,8 @@ #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) #define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ #define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) +#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */ +#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) struct pcie_device { int irq; /* Service IRQ/MSI/MSI-X Vector */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4bc6dafb703e..56939d3f6e53 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -129,7 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) -/* To avoid include hell, as printk can not declare this, we declare it here */ -DECLARE_PER_CPU(printk_func_t, printk_func); - #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 4196c90a3c88..d28ac05c7f92 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -105,6 +105,8 @@ struct arm_pmu { struct mutex reserve_mutex; u64 max_period; bool secure_access; /* 32-bit ARM only */ +#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 + DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; struct notifier_block hotplug_nb; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6b87be908790..1a827cecd62f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -984,8 +984,6 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { - memset(regs, 0, sizeof(*regs)); - perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } @@ -1140,7 +1138,7 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(u64 addr, u64 count, void *record, +extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task); diff --git a/include/linux/phy.h b/include/linux/phy.h index 2abd7918f64f..2d24b283aa2d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -805,6 +805,10 @@ void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); +int phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_ksettings_set(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); @@ -825,6 +829,10 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data); int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +int phy_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd); int __init mdio_bus_init(void); void mdio_bus_exit(void); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 8cf05e341cff..a810f2a18842 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -77,6 +77,7 @@ struct phy { */ struct phy_provider { struct device *dev; + struct device_node *children; struct module *owner; struct list_head list; struct phy * (*of_xlate)(struct device *dev, @@ -93,10 +94,16 @@ struct phy_lookup { #define to_phy(a) (container_of((a), struct phy, dev)) #define of_phy_provider_register(dev, xlate) \ - __of_phy_provider_register((dev), THIS_MODULE, (xlate)) + __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) #define devm_of_phy_provider_register(dev, xlate) \ - __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate)) + __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) + +#define of_phy_provider_register_full(dev, children, xlate) \ + __of_phy_provider_register(dev, children, THIS_MODULE, xlate) + +#define devm_of_phy_provider_register_full(dev, children, xlate) \ + __devm_of_phy_provider_register(dev, children, THIS_MODULE, xlate) static inline void phy_set_drvdata(struct phy *phy, void *data) { @@ -147,11 +154,13 @@ struct phy *devm_phy_create(struct device *dev, struct device_node *node, void phy_destroy(struct phy *phy); void devm_phy_destroy(struct device *dev, struct phy *phy); struct phy_provider *__of_phy_provider_register(struct device *dev, - struct module *owner, struct phy * (*of_xlate)(struct device *dev, - struct of_phandle_args *args)); + struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)); struct phy_provider *__devm_of_phy_provider_register(struct device *dev, - struct module *owner, struct phy * (*of_xlate)(struct device *dev, - struct of_phandle_args *args)); + struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)); void of_phy_provider_unregister(struct phy_provider *phy_provider); void devm_of_phy_provider_unregister(struct device *dev, struct phy_provider *phy_provider); @@ -312,15 +321,17 @@ static inline void devm_phy_destroy(struct device *dev, struct phy *phy) } static inline struct phy_provider *__of_phy_provider_register( - struct device *dev, struct module *owner, struct phy * (*of_xlate)( - struct device *dev, struct of_phandle_args *args)) + struct device *dev, struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)) { return ERR_PTR(-ENOSYS); } static inline struct phy_provider *__devm_of_phy_provider_register(struct device - *dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev, - struct of_phandle_args *args)) + *dev, struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h new file mode 100644 index 000000000000..8e1a57a78d9f --- /dev/null +++ b/include/linux/phy/tegra/xusb.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef PHY_TEGRA_XUSB_H +#define PHY_TEGRA_XUSB_H + +struct tegra_xusb_padctl; +struct device; + +struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev); +void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl); + +int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl, + unsigned int port); +int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl, + unsigned int port, bool idle); +int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl, + unsigned int port, bool enable); + +#endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 9ba59fcba549..a42e57da270d 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -144,6 +144,12 @@ struct pinctrl_desc { extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); extern void pinctrl_unregister(struct pinctrl_dev *pctldev); +extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, + struct pinctrl_desc *pctldesc, + void *driver_data); +extern void devm_pinctrl_unregister(struct device *dev, + struct pinctrl_dev *pctldev); + extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 03b6095d3b18..d15d8ba8cc24 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -21,15 +21,15 @@ * @dma_dev: required DMA master device * @src_id: src request line * @dst_id: dst request line - * @src_master: src master for transfers on allocated channel. - * @dst_master: dest master for transfers on allocated channel. + * @m_master: memory master for transfers on allocated channel + * @p_master: peripheral master for transfers on allocated channel */ struct dw_dma_slave { struct device *dma_dev; u8 src_id; u8 dst_id; - u8 src_master; - u8 dst_master; + u8 m_master; + u8 p_master; }; /** @@ -43,7 +43,7 @@ struct dw_dma_slave { * @block_size: Maximum block size supported by the controller * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master - * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) + * (in bytes, power of 2) */ struct dw_dma_platform_data { unsigned int nr_channels; @@ -55,7 +55,7 @@ struct dw_dma_platform_data { #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ unsigned char chan_priority; - unsigned short block_size; + unsigned int block_size; unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; }; diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h index 28702c849af1..2dc7f4a8ab09 100644 --- a/include/linux/platform_data/gpio-dwapb.h +++ b/include/linux/platform_data/gpio-dwapb.h @@ -15,8 +15,7 @@ #define GPIO_DW_APB_H struct dwapb_port_property { - struct device_node *node; - const char *name; + struct fwnode_handle *fwnode; unsigned int idx; unsigned int ngpio; unsigned int gpio_base; diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h new file mode 100644 index 000000000000..67ccdb0e1606 --- /dev/null +++ b/include/linux/platform_data/gpmc-omap.h @@ -0,0 +1,172 @@ +/* + * OMAP GPMC Platform data + * + * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Roger Quadros <rogerq@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _GPMC_OMAP_H_ +#define _GPMC_OMAP_H_ + +/* Maximum Number of Chip Selects */ +#define GPMC_CS_NUM 8 + +/* bool type time settings */ +struct gpmc_bool_timings { + bool cycle2cyclediffcsen; + bool cycle2cyclesamecsen; + bool we_extra_delay; + bool oe_extra_delay; + bool adv_extra_delay; + bool cs_extra_delay; + bool time_para_granularity; +}; + +/* + * Note that all values in this struct are in nanoseconds except sync_clk + * (which is in picoseconds), while the register values are in gpmc_fck cycles. + */ +struct gpmc_timings { + /* Minimum clock period for synchronous mode (in picoseconds) */ + u32 sync_clk; + + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ + u32 cs_on; /* Assertion time */ + u32 cs_rd_off; /* Read deassertion time */ + u32 cs_wr_off; /* Write deassertion time */ + + /* ADV signal timings corresponding to GPMC_CONFIG3 */ + u32 adv_on; /* Assertion time */ + u32 adv_rd_off; /* Read deassertion time */ + u32 adv_wr_off; /* Write deassertion time */ + u32 adv_aad_mux_on; /* ADV assertion time for AAD */ + u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ + u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ + + /* WE signals timings corresponding to GPMC_CONFIG4 */ + u32 we_on; /* WE assertion time */ + u32 we_off; /* WE deassertion time */ + + /* OE signals timings corresponding to GPMC_CONFIG4 */ + u32 oe_on; /* OE assertion time */ + u32 oe_off; /* OE deassertion time */ + u32 oe_aad_mux_on; /* OE assertion time for AAD */ + u32 oe_aad_mux_off; /* OE deassertion time for AAD */ + + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ + u32 page_burst_access; /* Multiple access word delay */ + u32 access; /* Start-cycle to first data valid delay */ + u32 rd_cycle; /* Total read cycle time */ + u32 wr_cycle; /* Total write cycle time */ + + u32 bus_turnaround; + u32 cycle2cycle_delay; + + u32 wait_monitoring; + u32 clk_activation; + + /* The following are only on OMAP3430 */ + u32 wr_access; /* WRACCESSTIME */ + u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ + + struct gpmc_bool_timings bool_timings; +}; + +/* Device timings in picoseconds */ +struct gpmc_device_timings { + u32 t_ceasu; /* address setup to CS valid */ + u32 t_avdasu; /* address setup to ADV valid */ + /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is + * of tusb using these timings even for sync whilst + * ideally for adv_rd/(wr)_off it should have considered + * t_avdh instead. This indirectly necessitates r/w + * variations of t_avdp as it is possible to have one + * sync & other async + */ + u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ + u32 t_avdp_w; + u32 t_aavdh; /* address hold time */ + u32 t_oeasu; /* address setup to OE valid */ + u32 t_aa; /* access time from ADV assertion */ + u32 t_iaa; /* initial access time */ + u32 t_oe; /* access time from OE assertion */ + u32 t_ce; /* access time from CS asertion */ + u32 t_rd_cycle; /* read cycle time */ + u32 t_cez_r; /* read CS deassertion to high Z */ + u32 t_cez_w; /* write CS deassertion to high Z */ + u32 t_oez; /* OE deassertion to high Z */ + u32 t_weasu; /* address setup to WE valid */ + u32 t_wpl; /* write assertion time */ + u32 t_wph; /* write deassertion time */ + u32 t_wr_cycle; /* write cycle time */ + + u32 clk; + u32 t_bacc; /* burst access valid clock to output delay */ + u32 t_ces; /* CS setup time to clk */ + u32 t_avds; /* ADV setup time to clk */ + u32 t_avdh; /* ADV hold time from clk */ + u32 t_ach; /* address hold time from clk */ + u32 t_rdyo; /* clk to ready valid */ + + u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ + u32 t_ce_avd; /* CS on to ADV on delay */ + + /* XXX: check the possibility of combining + * cyc_aavhd_oe & cyc_aavdh_we + */ + u8 cyc_aavdh_oe;/* read address hold time in cycles */ + u8 cyc_aavdh_we;/* write address hold time in cycles */ + u8 cyc_oe; /* access time from OE assertion in cycles */ + u8 cyc_wpl; /* write deassertion time in cycles */ + u32 cyc_iaa; /* initial access time in cycles */ + + /* extra delays */ + bool ce_xdelay; + bool avd_xdelay; + bool oe_xdelay; + bool we_xdelay; +}; + +#define GPMC_BURST_4 4 /* 4 word burst */ +#define GPMC_BURST_8 8 /* 8 word burst */ +#define GPMC_BURST_16 16 /* 16 word burst */ +#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ +#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ +#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ +#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ + +struct gpmc_settings { + bool burst_wrap; /* enables wrap bursting */ + bool burst_read; /* enables read page/burst mode */ + bool burst_write; /* enables write page/burst mode */ + bool device_nand; /* device is NAND */ + bool sync_read; /* enables synchronous reads */ + bool sync_write; /* enables synchronous writes */ + bool wait_on_read; /* monitor wait on reads */ + bool wait_on_write; /* monitor wait on writes */ + u32 burst_len; /* page/burst length */ + u32 device_width; /* device bus width (8 or 16 bit) */ + u32 mux_add_data; /* multiplex address & data */ + u32 wait_pin; /* wait-pin to be used */ +}; + +/* Data for each chip select */ +struct gpmc_omap_cs_data { + bool valid; /* data is valid */ + bool is_nand; /* device within this CS is NAND */ + struct gpmc_settings *settings; + struct gpmc_device_timings *device_timings; + struct gpmc_timings *gpmc_timings; + struct platform_device *pdev; /* device within this CS region */ + unsigned int pdata_size; +}; + +struct gpmc_omap_platform_data { + struct gpmc_omap_cs_data cs[GPMC_CS_NUM]; +}; + +#endif /* _GPMC_OMAP_H */ diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h index ad3aa7b95f35..554b59801aa8 100644 --- a/include/linux/platform_data/invensense_mpu6050.h +++ b/include/linux/platform_data/invensense_mpu6050.h @@ -16,13 +16,16 @@ /** * struct inv_mpu6050_platform_data - Platform data for the mpu driver - * @orientation: Orientation matrix of the chip + * @orientation: Orientation matrix of the chip (deprecated in favor of + * mounting matrix retrieved from device-tree) * * Contains platform specific information on how to configure the MPU6050 to * work on this platform. The orientation matricies are 3x3 rotation matricies * that are applied to the data to rotate from the mounting orientation to the * platform orientation. The values must be one of 0, 1, or -1 and each row and * column should have exactly 1 non-zero value. + * + * Deprecated in favor of mounting matrix retrieved from device-tree. */ struct inv_mpu6050_platform_data { __s8 orientation[9]; diff --git a/include/linux/platform_data/mailbox-omap.h b/include/linux/platform_data/mailbox-omap.h deleted file mode 100644 index 4631dbb4255e..000000000000 --- a/include/linux/platform_data/mailbox-omap.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * mailbox-omap.h - * - * Copyright (C) 2013 Texas Instruments, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _PLAT_MAILBOX_H -#define _PLAT_MAILBOX_H - -/* Interrupt register configuration types */ -#define MBOX_INTR_CFG_TYPE1 (0) -#define MBOX_INTR_CFG_TYPE2 (1) - -/** - * struct omap_mbox_dev_info - OMAP mailbox device attribute info - * @name: name of the mailbox device - * @tx_id: mailbox queue id used for transmitting messages - * @rx_id: mailbox queue id on which messages are received - * @irq_id: irq identifier number to use from the hwmod data - * @usr_id: mailbox user id for identifying the interrupt into - * the MPU interrupt controller. - */ -struct omap_mbox_dev_info { - const char *name; - u32 tx_id; - u32 rx_id; - u32 irq_id; - u32 usr_id; -}; - -/** - * struct omap_mbox_pdata - OMAP mailbox platform data - * @intr_type: type of interrupt configuration registers used - while programming mailbox queue interrupts - * @num_users: number of users (processor devices) that the mailbox - * h/w block can interrupt - * @num_fifos: number of h/w fifos within the mailbox h/w block - * @info_cnt: number of mailbox devices for the platform - * @info: array of mailbox device attributes - */ -struct omap_mbox_pdata { - u32 intr_type; - u32 num_users; - u32 num_fifos; - u32 info_cnt; - struct omap_mbox_dev_info *info; -}; - -#endif /* _PLAT_MAILBOX_H */ diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h index 104aa892f31b..3038120ca46e 100644 --- a/include/linux/platform_data/media/ir-rx51.h +++ b/include/linux/platform_data/media/ir-rx51.h @@ -5,6 +5,7 @@ struct lirc_rx51_platform_data { int pwm_timer; int(*set_max_mpu_wakeup_lat)(struct device *dev, long t); + struct pwm_omap_dmtimer_pdata *dmtimer; }; #endif diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index 090bbab0130a..17d57a18bac5 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h @@ -45,7 +45,6 @@ enum omap_ecc { }; struct gpmc_nand_regs { - void __iomem *gpmc_status; void __iomem *gpmc_nand_command; void __iomem *gpmc_nand_address; void __iomem *gpmc_nand_data; @@ -64,21 +63,24 @@ struct gpmc_nand_regs { void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; + /* Deprecated. Do not use */ + void __iomem *gpmc_status; }; struct omap_nand_platform_data { int cs; struct mtd_partition *parts; int nr_parts; - bool dev_ready; bool flash_bbt; enum nand_io xfer_type; int devsize; enum omap_ecc ecc_opt; - struct gpmc_nand_regs reg; - /* for passing the partitions */ - struct device_node *of_node; struct device_node *elm_of_node; + + /* deprecated */ + struct gpmc_nand_regs reg; + struct device_node *of_node; + bool dev_ready; }; #endif diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h index 59384217208f..e7d521e48855 100644 --- a/include/linux/platform_data/pwm_omap_dmtimer.h +++ b/include/linux/platform_data/pwm_omap_dmtimer.h @@ -35,6 +35,16 @@ #ifndef __PWM_OMAP_DMTIMER_PDATA_H #define __PWM_OMAP_DMTIMER_PDATA_H +/* clock sources */ +#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00 +#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01 +#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02 + +/* timer interrupt enable bits */ +#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2) +#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1) +#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0) + /* trigger types */ #define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00 #define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01 @@ -45,15 +55,23 @@ typedef struct omap_dm_timer pwm_omap_dmtimer; struct pwm_omap_dmtimer_pdata { pwm_omap_dmtimer *(*request_by_node)(struct device_node *np); + pwm_omap_dmtimer *(*request_specific)(int timer_id); + pwm_omap_dmtimer *(*request)(void); + int (*free)(pwm_omap_dmtimer *timer); void (*enable)(pwm_omap_dmtimer *timer); void (*disable)(pwm_omap_dmtimer *timer); + int (*get_irq)(pwm_omap_dmtimer *timer); + int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value); + int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask); + struct clk *(*get_fclk)(pwm_omap_dmtimer *timer); int (*start)(pwm_omap_dmtimer *timer); int (*stop)(pwm_omap_dmtimer *timer); + int (*set_source)(pwm_omap_dmtimer *timer, int source); int (*set_load)(pwm_omap_dmtimer *timer, int autoreload, unsigned int value); @@ -63,7 +81,10 @@ struct pwm_omap_dmtimer_pdata { int toggle, int trigger); int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler); + unsigned int (*read_counter)(pwm_omap_dmtimer *timer); int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value); + unsigned int (*read_status)(pwm_omap_dmtimer *timer); + int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value); }; #endif /* __PWM_OMAP_DMTIMER_PDATA_H */ diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index 753839187ba0..79b0e4cdb814 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -16,9 +16,11 @@ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * Available only for accelerometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). + * @open_drain: set the interrupt line to be open drain if possible. */ struct st_sensors_platform_data { u8 drdy_int_pin; + bool open_drain; }; #endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 03b755521fd9..98c2a7c7108e 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -18,7 +18,7 @@ #define PLATFORM_DEVID_AUTO (-2) struct mfd_cell; -struct property_set; +struct property_entry; struct platform_device { const char *name; @@ -73,7 +73,7 @@ struct platform_device_info { size_t size_data; u64 dma_mask; - const struct property_set *pset; + struct property_entry *properties; }; extern struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo); @@ -172,7 +172,7 @@ extern int platform_device_add_resources(struct platform_device *pdev, extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); extern int platform_device_add_properties(struct platform_device *pdev, - const struct property_set *pset); + struct property_entry *properties); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); diff --git a/include/linux/pm.h b/include/linux/pm.h index 6a5d654f4447..06eb353182ab 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -563,7 +563,6 @@ struct dev_pm_info { bool is_suspended:1; /* Ditto */ bool is_noirq_suspended:1; bool is_late_suspended:1; - bool ignore_children:1; bool early_init:1; /* Owned by the PM core */ bool direct_complete:1; /* Owned by the PM core */ spinlock_t lock; @@ -591,6 +590,7 @@ struct dev_pm_info { unsigned int deferred_resume:1; unsigned int run_wake:1; unsigned int runtime_auto:1; + bool ignore_children:1; unsigned int no_callbacks:1; unsigned int irq_safe:1; unsigned int use_autosuspend:1; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 49cd8890b873..39285c7bd3f5 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -28,14 +28,12 @@ enum gpd_status { struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); - bool (*stop_ok)(struct device *dev); + bool (*suspend_ok)(struct device *dev); }; struct gpd_dev_ops { int (*start)(struct device *dev); int (*stop)(struct device *dev); - int (*save_state)(struct device *dev); - int (*restore_state)(struct device *dev); bool (*active_wakeup)(struct device *dev); }; @@ -94,7 +92,7 @@ struct gpd_timing_data { s64 resume_latency_ns; s64 effective_constraint_ns; bool constraint_changed; - bool cached_stop_ok; + bool cached_suspend_ok; }; struct pm_domain_data { diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index cccaf4a29e9f..bca26157f5b6 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev); int dev_pm_opp_set_regulator(struct device *dev, const char *name); void dev_pm_opp_put_regulator(struct device *dev); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); +int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); +void dev_pm_opp_remove_table(struct device *dev); +void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); #else static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { @@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) @@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( struct device *dev) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_prop_name(struct device *dev) {} static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_regulator(struct device *dev) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { + return -ENOTSUPP; +} + +static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) +{ return -EINVAL; } +static inline void dev_pm_opp_remove_table(struct device *dev) +{ +} + +static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) +{ +} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); void dev_pm_opp_of_remove_table(struct device *dev); -int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); -void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); -int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); -int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); +int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); +void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) { } -static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) -{ - return -ENOSYS; -} - -static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) +static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { + return -ENOTSUPP; } -static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) { - return -ENOSYS; } -static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return -ENOSYS; + return -ENOTSUPP; } #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 7af093d6a4dd..2e14d2667b6c 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -56,6 +56,11 @@ extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) +{ + dev->power.ignore_children = enable; +} + static inline bool pm_children_suspended(struct device *dev) { return dev->power.ignore_children @@ -156,6 +161,7 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline bool pm_children_suspended(struct device *dev) { return false; } static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} diff --git a/include/linux/poll.h b/include/linux/poll.h index 9fb4f40d9a26..37b057b63b46 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack); -extern u64 select_estimate_accuracy(struct timespec *tv); +extern u64 select_estimate_accuracy(struct timespec64 *tv); static inline int poll_schedule(struct poll_wqueues *pwq, int state) @@ -153,12 +153,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset) #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) -extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); +extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time); extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, - struct timespec *end_time); + struct timespec64 *end_time); extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct timespec *end_time); + fd_set __user *exp, struct timespec64 *end_time); -extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); +extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, + long nsec); #endif /* _LINUX_POLL_H */ diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 3e96a6a76103..5b5a80cc5926 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -99,7 +99,6 @@ extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, extern int simple_set_acl(struct inode *, struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); -struct posix_acl **acl_by_type(struct inode *inode, int type); struct posix_acl *get_cached_acl(struct inode *inode, int type); struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); diff --git a/include/linux/printk.h b/include/linux/printk.h index 9ccbdf2c1453..f4da695fd615 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -122,7 +122,19 @@ static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif -typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); +#ifdef CONFIG_PRINTK_NMI +extern void printk_nmi_init(void); +extern void printk_nmi_enter(void); +extern void printk_nmi_exit(void); +extern void printk_nmi_flush(void); +extern void printk_nmi_flush_on_panic(void); +#else +static inline void printk_nmi_init(void) { } +static inline void printk_nmi_enter(void) { } +static inline void printk_nmi_exit(void) { } +static inline void printk_nmi_flush(void) { } +static inline void printk_nmi_flush_on_panic(void) { } +#endif /* PRINTK_NMI */ #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) diff --git a/include/linux/property.h b/include/linux/property.h index b51fcd36d892..ecab11e40794 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -238,18 +238,9 @@ struct property_entry { .name = _name_, \ } -/** - * struct property_set - Collection of "built-in" device properties. - * @fwnode: Handle to be pointed to by the fwnode field of struct device. - * @properties: Array of properties terminated with a null entry. - */ -struct property_set { - struct fwnode_handle fwnode; - struct property_entry *properties; -}; - -int device_add_property_set(struct device *dev, const struct property_set *pset); -void device_remove_property_set(struct device *dev); +int device_add_properties(struct device *dev, + struct property_entry *properties); +void device_remove_properties(struct device *dev); bool device_dma_supported(struct device *dev); diff --git a/include/linux/psci.h b/include/linux/psci.h index 393efe2edf9a..bdea1cb5e1db 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -21,8 +21,6 @@ #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 bool psci_tos_resident_on(int cpu); -bool psci_power_state_loses_context(u32 state); -bool psci_power_state_is_valid(u32 state); int psci_cpu_init_idle(unsigned int cpu); int psci_cpu_suspend_enter(unsigned long index); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index cfc3ed46cad2..17018f3c066e 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -5,59 +5,7 @@ #include <linux/mutex.h> #include <linux/of.h> -struct pwm_device; struct seq_file; - -#if IS_ENABLED(CONFIG_PWM) -/* - * pwm_request - request a PWM device - */ -struct pwm_device *pwm_request(int pwm_id, const char *label); - -/* - * pwm_free - free a PWM device - */ -void pwm_free(struct pwm_device *pwm); - -/* - * pwm_config - change a PWM device configuration - */ -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns); - -/* - * pwm_enable - start a PWM output toggling - */ -int pwm_enable(struct pwm_device *pwm); - -/* - * pwm_disable - stop a PWM output toggling - */ -void pwm_disable(struct pwm_device *pwm); -#else -static inline struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - return ERR_PTR(-ENODEV); -} - -static inline void pwm_free(struct pwm_device *pwm) -{ -} - -static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) -{ - return -EINVAL; -} - -static inline int pwm_enable(struct pwm_device *pwm) -{ - return -EINVAL; -} - -static inline void pwm_disable(struct pwm_device *pwm) -{ -} -#endif - struct pwm_chip; /** @@ -74,10 +22,41 @@ enum pwm_polarity { PWM_POLARITY_INVERSED, }; +/** + * struct pwm_args - board-dependent PWM arguments + * @period: reference period + * @polarity: reference polarity + * + * This structure describes board-dependent arguments attached to a PWM + * device. These arguments are usually retrieved from the PWM lookup table or + * device tree. + * + * Do not confuse this with the PWM state: PWM arguments represent the initial + * configuration that users want to use on this PWM device rather than the + * current PWM hardware state. + */ +struct pwm_args { + unsigned int period; + enum pwm_polarity polarity; +}; + enum { PWMF_REQUESTED = 1 << 0, - PWMF_ENABLED = 1 << 1, - PWMF_EXPORTED = 1 << 2, + PWMF_EXPORTED = 1 << 1, +}; + +/* + * struct pwm_state - state of a PWM channel + * @period: PWM period (in nanoseconds) + * @duty_cycle: PWM duty cycle (in nanoseconds) + * @polarity: PWM polarity + * @enabled: PWM enabled status + */ +struct pwm_state { + unsigned int period; + unsigned int duty_cycle; + enum pwm_polarity polarity; + bool enabled; }; /** @@ -88,10 +67,8 @@ enum { * @pwm: global index of the PWM device * @chip: PWM chip providing this PWM device * @chip_data: chip-private data associated with the PWM device - * @lock: used to serialize accesses to the PWM device where necessary - * @period: period of the PWM signal (in nanoseconds) - * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) - * @polarity: polarity of the PWM signal + * @args: PWM arguments + * @state: curent PWM channel state */ struct pwm_device { const char *label; @@ -100,48 +77,74 @@ struct pwm_device { unsigned int pwm; struct pwm_chip *chip; void *chip_data; - struct mutex lock; - unsigned int period; - unsigned int duty_cycle; - enum pwm_polarity polarity; + struct pwm_args args; + struct pwm_state state; }; +/** + * pwm_get_state() - retrieve the current PWM state + * @pwm: PWM device + * @state: state to fill with the current PWM state + */ +static inline void pwm_get_state(const struct pwm_device *pwm, + struct pwm_state *state) +{ + *state = pwm->state; +} + static inline bool pwm_is_enabled(const struct pwm_device *pwm) { - return test_bit(PWMF_ENABLED, &pwm->flags); + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.enabled; } static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) { if (pwm) - pwm->period = period; + pwm->state.period = period; } static inline unsigned int pwm_get_period(const struct pwm_device *pwm) { - return pwm ? pwm->period : 0; + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.period; } static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) { if (pwm) - pwm->duty_cycle = duty; + pwm->state.duty_cycle = duty; } static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) { - return pwm ? pwm->duty_cycle : 0; -} + struct pwm_state state; -/* - * pwm_set_polarity - configure the polarity of a PWM signal - */ -int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); + pwm_get_state(pwm, &state); + + return state.duty_cycle; +} static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) { - return pwm ? pwm->polarity : PWM_POLARITY_NORMAL; + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.polarity; +} + +static inline void pwm_get_args(const struct pwm_device *pwm, + struct pwm_args *args) +{ + *args = pwm->args; } /** @@ -152,6 +155,13 @@ static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) * @set_polarity: configure the polarity of this PWM * @enable: enable PWM output toggling * @disable: disable PWM output toggling + * @apply: atomically apply a new PWM config. The state argument + * should be adjusted with the real hardware config (if the + * approximate the period or duty_cycle value, state should + * reflect it) + * @get_state: get the current PWM state. This function is only + * called once per PWM device when the PWM chip is + * registered. * @dbg_show: optional routine to show contents in debugfs * @owner: helps prevent removal of modules exporting active PWMs */ @@ -164,6 +174,10 @@ struct pwm_ops { enum pwm_polarity polarity); int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); + int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); + void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); #ifdef CONFIG_DEBUG_FS void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); #endif @@ -199,6 +213,115 @@ struct pwm_chip { }; #if IS_ENABLED(CONFIG_PWM) +/* PWM user APIs */ +struct pwm_device *pwm_request(int pwm_id, const char *label); +void pwm_free(struct pwm_device *pwm); +int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state); +int pwm_adjust_config(struct pwm_device *pwm); + +/** + * pwm_config() - change a PWM device configuration + * @pwm: PWM device + * @duty_ns: "on" time (in nanoseconds) + * @period_ns: duration (in nanoseconds) of one cycle + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.duty_cycle == duty_ns && state.period == period_ns) + return 0; + + state.duty_cycle = duty_ns; + state.period = period_ns; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_set_polarity() - configure the polarity of a PWM signal + * @pwm: PWM device + * @polarity: new polarity of the PWM signal + * + * Note that the polarity cannot be configured while the PWM device is + * enabled. + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.polarity == polarity) + return 0; + + /* + * Changing the polarity of a running PWM without adjusting the + * dutycycle/period value is a bit risky (can introduce glitches). + * Return -EBUSY in this case. + * Note that this is allowed when using pwm_apply_state() because + * the user specifies all the parameters. + */ + if (state.enabled) + return -EBUSY; + + state.polarity = polarity; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_enable() - start a PWM output toggling + * @pwm: PWM device + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_enable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.enabled) + return 0; + + state.enabled = true; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_disable() - stop a PWM output toggling + * @pwm: PWM device + */ +static inline void pwm_disable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return; + + pwm_get_state(pwm, &state); + if (!state.enabled) + return; + + state.enabled = false; + pwm_apply_state(pwm, &state); +} + + +/* PWM provider APIs */ int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); @@ -224,6 +347,47 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm); bool pwm_can_sleep(struct pwm_device *pwm); #else +static inline struct pwm_device *pwm_request(int pwm_id, const char *label) +{ + return ERR_PTR(-ENODEV); +} + +static inline void pwm_free(struct pwm_device *pwm) +{ +} + +static inline int pwm_apply_state(struct pwm_device *pwm, + const struct pwm_state *state) +{ + return -ENOTSUPP; +} + +static inline int pwm_adjust_config(struct pwm_device *pwm) +{ + return -ENOTSUPP; +} + +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + return -EINVAL; +} + +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + return -ENOTSUPP; +} + +static inline int pwm_enable(struct pwm_device *pwm) +{ + return -EINVAL; +} + +static inline void pwm_disable(struct pwm_device *pwm) +{ +} + static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) { return -EINVAL; @@ -295,6 +459,34 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm) } #endif +static inline void pwm_apply_args(struct pwm_device *pwm) +{ + /* + * PWM users calling pwm_apply_args() expect to have a fresh config + * where the polarity and period are set according to pwm_args info. + * The problem is, polarity can only be changed when the PWM is + * disabled. + * + * PWM drivers supporting hardware readout may declare the PWM device + * as enabled, and prevent polarity setting, which changes from the + * existing behavior, where all PWM devices are declared as disabled + * at startup (even if they are actually enabled), thus authorizing + * polarity setting. + * + * Instead of setting ->enabled to false, we call pwm_disable() + * before pwm_set_polarity() to ensure that everything is configured + * as expected, and the PWM is really disabled when the user request + * it. + * + * Note that PWM users requiring a smooth handover between the + * bootloader and the kernel (like critical regulators controlled by + * PWM devices) will have to switch to the atomic API and avoid calling + * pwm_apply_args(). + */ + pwm_disable(pwm); + pwm_set_polarity(pwm, pwm->args.polarity); +} + struct pwm_lookup { struct list_head list; const char *provider; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 53ecb37ae563..3f14c7efe68f 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -285,6 +285,63 @@ #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 @@ -327,9 +384,14 @@ struct regpair { __le32 hi; }; +struct vf_pf_channel_eqe_data { + struct regpair msg_addr; +}; + /* Event Data Union */ union event_ring_data { u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; struct async_data async_info; }; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index e1d69834a11f..6ae8cb4a61d3 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -13,6 +13,7 @@ #include <linux/if_link.h> #include <linux/qed/eth_common.h> #include <linux/qed/qed_if.h> +#include <linux/qed/qed_iov_if.h> struct qed_dev_eth_info { struct qed_dev_info common; @@ -27,12 +28,15 @@ struct qed_dev_eth_info { struct qed_update_vport_rss_params { u16 rss_ind_table[128]; u32 rss_key[10]; + u8 rss_caps; }; struct qed_update_vport_params { u8 vport_id; u8 update_vport_active_flg; u8 vport_active_flg; + u8 update_tx_switching_flg; + u8 tx_switching_flg; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; u8 update_rss_flg; @@ -111,12 +115,23 @@ struct qed_queue_start_common_params { u16 sb_idx; }; +struct qed_tunn_params { + u16 vxlan_port; + u8 update_vxlan_port; + u16 geneve_port; + u8 update_geneve_port; +}; + struct qed_eth_cb_ops { struct qed_common_cb_ops common; + void (*force_mac) (void *dev, u8 *mac); }; struct qed_eth_ops { const struct qed_common_ops *common; +#ifdef CONFIG_QED_SRIOV + const struct qed_iov_hv_ops *iov; +#endif int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); @@ -125,6 +140,8 @@ struct qed_eth_ops { struct qed_eth_cb_ops *ops, void *cookie); + bool(*check_mac) (struct qed_dev *cdev, u8 *mac); + int (*vport_start)(struct qed_dev *cdev, struct qed_start_vport_params *params); @@ -165,9 +182,12 @@ struct qed_eth_ops { void (*get_vport_stats)(struct qed_dev *cdev, struct qed_eth_stats *stats); + + int (*tunn_config)(struct qed_dev *cdev, + struct qed_tunn_params *params); }; -const struct qed_eth_ops *qed_get_eth_ops(u32 version); +const struct qed_eth_ops *qed_get_eth_ops(void); void qed_put_eth_ops(void); #endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 1f7599c77cd4..4c29439f54bf 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -25,6 +25,15 @@ #include <linux/qed/common_hsi.h> #include <linux/qed/qed_chain.h> +enum dcbx_protocol_type { + DCBX_PROTOCOL_ISCSI, + DCBX_PROTOCOL_FCOE, + DCBX_PROTOCOL_ROCE, + DCBX_PROTOCOL_ROCE_V2, + DCBX_PROTOCOL_ETH, + DCBX_MAX_PROTOCOL_TYPE +}; + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -93,6 +102,7 @@ struct qed_dev_info { u32 flash_size; u8 mf_mode; + bool tx_switching; }; enum qed_sb_type { @@ -110,6 +120,7 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) u32 override_flags; bool autoneg; u32 adv_speeds; @@ -118,6 +129,12 @@ struct qed_link_params { #define QED_LINK_PAUSE_RX_ENABLE BIT(1) #define QED_LINK_PAUSE_TX_ENABLE BIT(2) u32 pause_config; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) + u32 loopback_mode; }; struct qed_link_output { @@ -133,6 +150,13 @@ struct qed_link_output { u32 pause_config; }; +struct qed_probe_params { + enum qed_protocol protocol; + u32 dp_module; + u8 dp_level; + bool is_vf; +}; + #define QED_DRV_VER_STR_SIZE 12 struct qed_slowpath_params { u32 int_mode; @@ -158,10 +182,49 @@ struct qed_common_cb_ops { struct qed_link_output *link); }; +struct qed_selftest_ops { +/** + * @brief selftest_interrupt - Perform interrupt test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_interrupt)(struct qed_dev *cdev); + +/** + * @brief selftest_memory - Perform memory test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_memory)(struct qed_dev *cdev); + +/** + * @brief selftest_register - Perform register test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_register)(struct qed_dev *cdev); + +/** + * @brief selftest_clock - Perform clock test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_clock)(struct qed_dev *cdev); +}; + struct qed_common_ops { + struct qed_selftest_ops *selftest; + struct qed_dev* (*probe)(struct pci_dev *dev, - enum qed_protocol protocol, - u32 dp_module, u8 dp_level); + struct qed_probe_params *params); void (*remove)(struct qed_dev *cdev); @@ -211,6 +274,16 @@ struct qed_common_ops { void (*simd_handler_clean)(struct qed_dev *cdev, int index); + +/** + * @brief can_link_change - can the instance change the link or not + * + * @param cdev + * + * @return true if link-change is allowed, false otherwise. + */ + bool (*can_link_change)(struct qed_dev *cdev); + /** * @brief set_link - set links according to params * @@ -271,15 +344,6 @@ struct qed_common_ops { enum qed_led_mode mode); }; -/** - * @brief qed_get_protocol_version - * - * @param protocol - * - * @return version supported by qed for given protocol driver - */ -u32 qed_get_protocol_version(enum qed_protocol protocol); - #define MASK_FIELD(_name, _value) \ ((_value) &= (_name ## _MASK)) @@ -393,16 +457,16 @@ struct qed_eth_stats { /* port */ u64 rx_64_byte_packets; - u64 rx_127_byte_packets; - u64 rx_255_byte_packets; - u64 rx_511_byte_packets; - u64 rx_1023_byte_packets; - u64 rx_1518_byte_packets; - u64 rx_1522_byte_packets; - u64 rx_2047_byte_packets; - u64 rx_4095_byte_packets; - u64 rx_9216_byte_packets; - u64 rx_16383_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; @@ -524,4 +588,15 @@ static inline void internal_ram_wr(void __iomem *addr, __internal_ram_wr(NULL, addr, size, data); } +enum qed_rss_caps { + QED_RSS_IPV4 = 0x1, + QED_RSS_IPV6 = 0x2, + QED_RSS_IPV4_TCP = 0x4, + QED_RSS_IPV6_TCP = 0x8, + QED_RSS_IPV4_UDP = 0x10, + QED_RSS_IPV6_UDP = 0x20, +}; + +#define QED_RSS_IND_TABLE_SIZE 128 +#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ #endif diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h new file mode 100644 index 000000000000..5a4f8d0899e9 --- /dev/null +++ b/include/linux/qed/qed_iov_if.h @@ -0,0 +1,34 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_IOV_IF_H +#define _QED_IOV_IF_H + +#include <linux/qed/qed_if.h> + +/* Structs used by PF to control and manipulate child VFs */ +struct qed_iov_hv_ops { + int (*configure)(struct qed_dev *cdev, int num_vfs_param); + + int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid); + + int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); + + int (*get_config) (struct qed_dev *cdev, int vf_id, + struct ifla_vf_info *ivi); + + int (*set_link_state) (struct qed_dev *cdev, int vf_id, + int link_state); + + int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val); + + int (*set_rate) (struct qed_dev *cdev, int vfid, + u32 min_rate, u32 max_rate); +}; + +#endif diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 51a97ac8bfbf..cb4b7e8cee81 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -29,51 +29,45 @@ #include <linux/rcupdate.h> /* - * An indirect pointer (root->rnode pointing to a radix_tree_node, rather - * than a data item) is signalled by the low bit set in the root->rnode - * pointer. - * - * In this case root->height is > 0, but the indirect pointer tests are - * needed for RCU lookups (because root->height is unreliable). The only - * time callers need worry about this is when doing a lookup_slot under - * RCU. - * - * Indirect pointer in fact is also used to tag the last pointer of a node - * when it is shrunk, before we rcu free the node. See shrink code for - * details. + * The bottom two bits of the slot determine how the remaining bits in the + * slot are interpreted: + * + * 00 - data pointer + * 01 - internal entry + * 10 - exceptional entry + * 11 - locked exceptional entry + * + * The internal entry may be a pointer to the next level in the tree, a + * sibling entry, or an indicator that the entry in this slot has been moved + * to another location in the tree and the lookup should be restarted. While + * NULL fits the 'data pointer' pattern, it means that there is no entry in + * the tree for this index (no matter what level of the tree it is found at). + * This means that you cannot store NULL in the tree as a value for the index. */ -#define RADIX_TREE_INDIRECT_PTR 1 +#define RADIX_TREE_ENTRY_MASK 3UL +#define RADIX_TREE_INTERNAL_NODE 1UL + /* - * A common use of the radix tree is to store pointers to struct pages; - * but shmem/tmpfs needs also to store swap entries in the same tree: - * those are marked as exceptional entries to distinguish them. + * Most users of the radix tree store pointers but shmem/tmpfs stores swap + * entries in the same tree. They are marked as exceptional entries to + * distinguish them from pointers to struct page. * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. */ #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 -#define RADIX_DAX_MASK 0xf -#define RADIX_DAX_SHIFT 4 -#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY) -#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY) -#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK) -#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) -#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ - RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE))) - -static inline int radix_tree_is_indirect_ptr(void *ptr) +static inline bool radix_tree_is_internal_node(void *ptr) { - return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); + return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == + RADIX_TREE_INTERNAL_NODE; } /*** radix-tree API starts here ***/ #define RADIX_TREE_MAX_TAGS 3 -#ifdef __KERNEL__ +#ifndef RADIX_TREE_MAP_SHIFT #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) -#else -#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ #endif #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) @@ -86,16 +80,13 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* Height component in node->path */ -#define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1) -#define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1) - /* Internally used bits of node->count */ #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) struct radix_tree_node { - unsigned int path; /* Offset in parent & height from the bottom */ + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ unsigned int count; union { struct { @@ -115,13 +106,11 @@ struct radix_tree_node { /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ struct radix_tree_root { - unsigned int height; gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; }; #define RADIX_TREE_INIT(mask) { \ - .height = 0, \ .gfp_mask = (mask), \ .rnode = NULL, \ } @@ -131,11 +120,15 @@ struct radix_tree_root { #define INIT_RADIX_TREE(root, mask) \ do { \ - (root)->height = 0; \ (root)->gfp_mask = (mask); \ (root)->rnode = NULL; \ } while (0) +static inline bool radix_tree_empty(struct radix_tree_root *root) +{ + return root->rnode == NULL; +} + /** * Radix-tree synchronization * @@ -231,7 +224,7 @@ static inline void *radix_tree_deref_slot_protected(void **pslot, */ static inline int radix_tree_deref_retry(void *arg) { - return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); + return unlikely(radix_tree_is_internal_node(arg)); } /** @@ -252,8 +245,7 @@ static inline int radix_tree_exceptional_entry(void *arg) */ static inline int radix_tree_exception(void *arg) { - return unlikely((unsigned long)arg & - (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY)); + return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } /** @@ -266,7 +258,7 @@ static inline int radix_tree_exception(void *arg) */ static inline void radix_tree_replace_slot(void **pslot, void *item) { - BUG_ON(radix_tree_is_indirect_ptr(item)); + BUG_ON(radix_tree_is_internal_node(item)); rcu_assign_pointer(*pslot, item); } @@ -288,9 +280,12 @@ bool __radix_tree_delete_node(struct radix_tree_root *root, struct radix_tree_node *node); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -unsigned int -radix_tree_gang_lookup(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items); +struct radix_tree_node *radix_tree_replace_clear_tags( + struct radix_tree_root *root, + unsigned long index, void *entry); +unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, + void **results, unsigned long first_index, + unsigned int max_items); unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); @@ -327,8 +322,9 @@ static inline void radix_tree_preload_end(void) * struct radix_tree_iter - radix tree iterator state * * @index: index of current slot - * @next_index: next-to-last index for this chunk + * @next_index: one beyond the last index for this chunk * @tags: bit-mask for tag-iterating + * @shift: shift for the node that holds our slots * * This radix tree iterator works in terms of "chunks" of slots. A chunk is a * subinterval of slots contained within one radix tree leaf node. It is @@ -341,8 +337,20 @@ struct radix_tree_iter { unsigned long index; unsigned long next_index; unsigned long tags; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif }; +static inline unsigned int iter_shift(struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ @@ -402,6 +410,12 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) return NULL; } +static inline unsigned long +__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) +{ + return iter->index + (slots << iter_shift(iter)); +} + /** * radix_tree_iter_next - resume iterating when the chunk may be invalid * @iter: iterator state @@ -413,7 +427,7 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) static inline __must_check void **radix_tree_iter_next(struct radix_tree_iter *iter) { - iter->next_index = iter->index + 1; + iter->next_index = __radix_tree_iter_add(iter, 1); iter->tags = 0; return NULL; } @@ -427,7 +441,12 @@ void **radix_tree_iter_next(struct radix_tree_iter *iter) static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { - return iter->next_index - iter->index; + return (iter->next_index - iter->index) >> iter_shift(iter); +} + +static inline struct radix_tree_node *entry_to_node(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); } /** @@ -445,24 +464,49 @@ static __always_inline void ** radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { + void *canon = slot; + iter->tags >>= 1; + if (unlikely(!iter->tags)) + return NULL; + while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(slot[1])) { + if (entry_to_node(slot[1]) == canon) { + iter->tags >>= 1; + iter->index = __radix_tree_iter_add(iter, 1); + slot++; + continue; + } + iter->next_index = __radix_tree_iter_add(iter, 1); + return NULL; + } if (likely(iter->tags & 1ul)) { - iter->index++; + iter->index = __radix_tree_iter_add(iter, 1); return slot + 1; } - if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { + if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); iter->tags >>= offset; - iter->index += offset + 1; + iter->index = __radix_tree_iter_add(iter, offset + 1); return slot + offset + 1; } } else { - long size = radix_tree_chunk_size(iter); + long count = radix_tree_chunk_size(iter); + void *canon = slot; - while (--size > 0) { + while (--count > 0) { slot++; - iter->index++; + iter->index = __radix_tree_iter_add(iter, 1); + + if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(*slot)) { + if (entry_to_node(*slot) == canon) + continue; + iter->next_index = iter->index; + break; + } + if (likely(*slot)) return slot; if (flags & RADIX_TREE_ITER_CONTIG) { @@ -476,34 +520,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) } /** - * radix_tree_for_each_chunk - iterate over chunks - * - * @slot: the void** variable for pointer to chunk first slot - * @root: the struct radix_tree_root pointer - * @iter: the struct radix_tree_iter pointer - * @start: iteration starting index - * @flags: RADIX_TREE_ITER_* and tag index - * - * Locks can be released and reacquired between iterations. - */ -#define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ - for (slot = radix_tree_iter_init(iter, start) ; \ - (slot = radix_tree_next_chunk(root, iter, flags)) ;) - -/** - * radix_tree_for_each_chunk_slot - iterate over slots in one chunk - * - * @slot: the void** variable, at the beginning points to chunk first slot - * @iter: the struct radix_tree_iter pointer - * @flags: RADIX_TREE_ITER_*, should be constant - * - * This macro is designed to be nested inside radix_tree_for_each_chunk(). - * @slot points to the radix tree slot, @iter->index contains its index. - */ -#define radix_tree_for_each_chunk_slot(slot, iter, flags) \ - for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) - -/** * radix_tree_for_each_slot - iterate over non-empty slots * * @slot: the void** variable for pointer to slot diff --git a/include/linux/random.h b/include/linux/random.h index 9c29122037f9..e47e533742b5 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -26,7 +26,6 @@ extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); -void generate_random_uuid(unsigned char uuid_out[16]); extern int random_int_secret_init(void); #ifndef MODULE diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 17d4f849c65e..8beb98dcf14f 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -488,6 +488,42 @@ static inline void hlist_add_head_rcu(struct hlist_node *n, } /** + * hlist_add_tail_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_tail_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *i, *last = NULL; + + for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i)) + last = i; + + if (last) { + n->next = last->next; + n->pprev = &last->next; + rcu_assign_pointer(hlist_next_rcu(last), n); + } else { + hlist_add_head_rcu(n, h); + } +} + +/** * hlist_add_before_rcu * @n: the new element to add to the hash list. * @next: the existing element to add the new element before. diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h index 2eb386017fa5..113d861a1e4c 100644 --- a/include/linux/regulator/act8865.h +++ b/include/linux/regulator/act8865.h @@ -69,11 +69,13 @@ enum { * @id: regulator id * @name: regulator name * @init_data: regulator init data + * @of_node: device tree node (optional) */ struct act8865_regulator_data { int id; const char *name; struct regulator_init_data *init_data; + struct device_node *of_node; }; /** diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index cd271e89a7e6..fcfa40a6692c 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -93,6 +93,9 @@ struct regulator_linear_range { * @get_current_limit: Get the configured limit for a current-limited regulator. * @set_input_current_limit: Configure an input limit. * + * @set_over_current_protection: Support capability of automatically shutting + * down when detecting an over current event. + * * @set_active_discharge: Set active discharge enable/disable of regulators. * * @set_mode: Set the configured operating mode for the regulator. @@ -255,6 +258,8 @@ enum regulator_type { * * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector + * @csel_reg: Register for TPS65218 LS3 current regulator + * @csel_mask: Mask for TPS65218 LS3 current regulator * @apply_reg: Register for initiate voltage change on the output when * using regulator_set_voltage_sel_regmap * @apply_bit: Register bitfield used for initiate voltage change on the @@ -292,7 +297,7 @@ struct regulator_desc { const struct regulator_desc *, struct regulator_config *); int id; - bool continuous_voltage_range; + unsigned int continuous_voltage_range:1; unsigned n_voltages; const struct regulator_ops *ops; int irq; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 5d627c83a630..ad3e5158e586 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -97,6 +97,7 @@ struct regulator_state { * @ramp_disable: Disable ramp delay when initialising or when setting voltage. * @soft_start: Enable soft start so that voltage ramps slowly. * @pull_down: Enable pull down when regulator is disabled. + * @over_current_protection: Auto disable on over current event. * * @input_uV: Input voltage for regulator when supplied by another regulator. * diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h index f6a8a16a0d4d..2fcb9980262a 100644 --- a/include/linux/regulator/max8973-regulator.h +++ b/include/linux/regulator/max8973-regulator.h @@ -54,6 +54,10 @@ * @reg_init_data: The regulator init data. * @control_flags: Control flags which are ORed value of above flags to * configure device. + * @junction_temp_warning: Junction temp in millicelcius on which warning need + * to be set. Thermal functionality is only supported on + * MAX77621. The threshold warning supported by MAX77621 + * are 120C and 140C. * @enable_ext_control: Enable the voltage enable/disable through external * control signal from EN input pin. If it is false then * voltage output will be enabled/disabled through EN bit of @@ -67,6 +71,7 @@ struct max8973_regulator_platform_data { struct regulator_init_data *reg_init_data; unsigned long control_flags; + unsigned long junction_temp_warning; bool enable_ext_control; int enable_gpio; int dvs_gpio; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 9c4e1384f636..1c457a8dd5a6 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -365,6 +365,8 @@ enum rproc_state { /** * enum rproc_crash_type - remote processor crash types * @RPROC_MMUFAULT: iommu fault + * @RPROC_WATCHDOG: watchdog bite + * @RPROC_FATAL_ERROR fatal error * * Each element of the enum is used as an array index. So that, the value of * the elements should be always something sane. @@ -373,6 +375,8 @@ enum rproc_state { */ enum rproc_crash_type { RPROC_MMUFAULT, + RPROC_WATCHDOG, + RPROC_FATAL_ERROR, }; /** diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 5a0b64cf68b4..49d057655d62 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -120,6 +120,24 @@ reservation_object_get_excl(struct reservation_object *obj) reservation_object_held(obj)); } +static inline struct fence * +reservation_object_get_excl_rcu(struct reservation_object *obj) +{ + struct fence *fence; + unsigned seq; +retry: + seq = read_seqcount_begin(&obj->seq); + rcu_read_lock(); + fence = rcu_dereference(obj->fence_excl); + if (read_seqcount_retry(&obj->seq, seq)) { + rcu_read_unlock(); + goto retry; + } + fence = fence_get(fence); + rcu_read_unlock(); + return fence; +} + int reservation_object_reserve_shared(struct reservation_object *obj); void reservation_object_add_shared_fence(struct reservation_object *obj, struct fence *fence); diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index a3a5bcdb1d02..b91ba932bbd4 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -31,6 +31,7 @@ struct of_phandle_args; * @ops: a pointer to device specific struct reset_control_ops * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices + * @reset_control_head: head of internal list of requested reset controls * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the @@ -41,6 +42,7 @@ struct reset_controller_dev { const struct reset_control_ops *ops; struct module *owner; struct list_head list; + struct list_head reset_control_head; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, diff --git a/include/linux/reset.h b/include/linux/reset.h index c4c097de0ba9..ec0306ce7b92 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -1,8 +1,8 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ -struct device; -struct device_node; +#include <linux/device.h> + struct reset_control; #ifdef CONFIG_RESET_CONTROLLER @@ -12,9 +12,11 @@ int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); int reset_control_status(struct reset_control *rstc); -struct reset_control *reset_control_get(struct device *dev, const char *id); +struct reset_control *__of_reset_control_get(struct device_node *node, + const char *id, int index, int shared); void reset_control_put(struct reset_control *rstc); -struct reset_control *devm_reset_control_get(struct device *dev, const char *id); +struct reset_control *__devm_reset_control_get(struct device *dev, + const char *id, int index, int shared); int __must_check device_reset(struct device *dev); @@ -23,24 +25,6 @@ static inline int device_reset_optional(struct device *dev) return device_reset(dev); } -static inline struct reset_control *reset_control_get_optional( - struct device *dev, const char *id) -{ - return reset_control_get(dev, id); -} - -static inline struct reset_control *devm_reset_control_get_optional( - struct device *dev, const char *id) -{ - return devm_reset_control_get(dev, id); -} - -struct reset_control *of_reset_control_get(struct device_node *node, - const char *id); - -struct reset_control *of_reset_control_get_by_index( - struct device_node *node, int index); - #else static inline int reset_control_reset(struct reset_control *rstc) @@ -72,49 +56,191 @@ static inline void reset_control_put(struct reset_control *rstc) WARN_ON(1); } +static inline int __must_check device_reset(struct device *dev) +{ + WARN_ON(1); + return -ENOTSUPP; +} + static inline int device_reset_optional(struct device *dev) { return -ENOTSUPP; } -static inline struct reset_control *__must_check reset_control_get( - struct device *dev, const char *id) +static inline struct reset_control *__of_reset_control_get( + struct device_node *node, + const char *id, int index, int shared) { - WARN_ON(1); return ERR_PTR(-EINVAL); } -static inline struct reset_control *__must_check devm_reset_control_get( +static inline struct reset_control *__devm_reset_control_get( + struct device *dev, + const char *id, int index, int shared) +{ + return ERR_PTR(-EINVAL); +} + +#endif /* CONFIG_RESET_CONTROLLER */ + +/** + * reset_control_get - Lookup and obtain an exclusive reference to a + * reset controller. + * @dev: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * If this function is called more then once for the same reset_control it will + * return -EBUSY. + * + * See reset_control_get_shared for details on shared references to + * reset-controls. + * + * Use of id names is optional. + */ +static inline struct reset_control *__must_check reset_control_get( struct device *dev, const char *id) { +#ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); - return ERR_PTR(-EINVAL); +#endif + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); } static inline struct reset_control *reset_control_get_optional( struct device *dev, const char *id) { - return ERR_PTR(-ENOTSUPP); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); } -static inline struct reset_control *devm_reset_control_get_optional( +/** + * reset_control_get_shared - Lookup and obtain a shared reference to a + * reset controller. + * @dev: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * This function is intended for use with reset-controls which are shared + * between hardware-blocks. + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * + * Use of id names is optional. + */ +static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return ERR_PTR(-ENOTSUPP); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); } +/** + * of_reset_control_get - Lookup and obtain an exclusive reference to a + * reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { - return ERR_PTR(-ENOTSUPP); + return __of_reset_control_get(node, id, 0, 0); } +/** + * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to + * a reset controller by index. + * @node: device to be reset by the controller + * @index: index of the reset controller + * + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. + */ static inline struct reset_control *of_reset_control_get_by_index( - struct device_node *node, int index) + struct device_node *node, int index) { - return ERR_PTR(-ENOTSUPP); + return __of_reset_control_get(node, NULL, index, 0); } -#endif /* CONFIG_RESET_CONTROLLER */ +/** + * devm_reset_control_get - resource managed reset_control_get() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get(). For reset controllers returned from this + * function, reset_control_put() is called automatically on driver detach. + * See reset_control_get() for more information. + */ +static inline struct reset_control *__must_check devm_reset_control_get( + struct device *dev, const char *id) +{ +#ifndef CONFIG_RESET_CONTROLLER + WARN_ON(1); +#endif + return __devm_reset_control_get(dev, id, 0, 0); +} + +static inline struct reset_control *devm_reset_control_get_optional( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, 0); +} + +/** + * devm_reset_control_get_by_index - resource managed reset_control_get + * @dev: device to be reset by the controller + * @index: index of the reset controller + * + * Managed reset_control_get(). For reset controllers returned from this + * function, reset_control_put() is called automatically on driver detach. + * See reset_control_get() for more information. + */ +static inline struct reset_control *devm_reset_control_get_by_index( + struct device *dev, int index) +{ + return __devm_reset_control_get(dev, NULL, index, 0); +} + +/** + * devm_reset_control_get_shared - resource managed reset_control_get_shared() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_shared(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver detach. + * See reset_control_get_shared() for more information. + */ +static inline struct reset_control *devm_reset_control_get_shared( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, 1); +} + +/** + * devm_reset_control_get_shared_by_index - resource managed + * reset_control_get_shared + * @dev: device to be reset by the controller + * @index: index of the reset controller + * + * Managed reset_control_get_shared(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver detach. + * See reset_control_get_shared() for more information. + */ +static inline struct reset_control *devm_reset_control_get_shared_by_index( + struct device *dev, int index) +{ + return __devm_reset_control_get(dev, NULL, index, 1); +} #endif diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 63bd7601b6de..3eef0802a0cd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -346,7 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, struct bucket_table *old_tbl); int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, + gfp_t gfp); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 82a673905edb..ada50ff36da0 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -169,7 +169,7 @@ struct rpmsg_driver { int register_rpmsg_device(struct rpmsg_channel *dev); void unregister_rpmsg_device(struct rpmsg_channel *dev); -int register_rpmsg_driver(struct rpmsg_driver *drv); +int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); void unregister_rpmsg_driver(struct rpmsg_driver *drv); void rpmsg_destroy_ept(struct rpmsg_endpoint *); struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *, @@ -177,6 +177,22 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *, int rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool); +/* use a macro to avoid include chaining to get THIS_MODULE */ +#define register_rpmsg_driver(drv) \ + __register_rpmsg_driver(drv, THIS_MODULE) + +/** + * module_rpmsg_driver() - Helper macro for registering an rpmsg driver + * @__rpmsg_driver: rpmsg_driver struct + * + * Helper macro for rpmsg drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_rpmsg_driver(__rpmsg_driver) \ + module_driver(__rpmsg_driver, register_rpmsg_driver, \ + unregister_rpmsg_driver) + /** * rpmsg_send() - send a message across to the remote processor * @rpdev: the rpmsg channel diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 556ec1ea2574..cb3c8fe6acd7 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -286,6 +286,31 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) /* + * The maximum number of SG segments that we will put inside a + * scatterlist (unless chaining is used). Should ideally fit inside a + * single page, to avoid a higher order allocation. We could define this + * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The + * minimum value is 32 + */ +#define SG_CHUNK_SIZE 128 + +/* + * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit + * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. + */ +#ifdef CONFIG_ARCH_HAS_SG_CHAIN +#define SG_MAX_SEGMENTS 2048 +#else +#define SG_MAX_SEGMENTS SG_CHUNK_SIZE +#endif + +#ifdef CONFIG_SG_POOL +void sg_free_table_chained(struct sg_table *table, bool first_chunk); +int sg_alloc_table_chained(struct sg_table *table, int nents, + struct scatterlist *first_chunk); +#endif + +/* * sg page iterator * * Iterates over sg entries page-by-page. On each successful iteration, diff --git a/include/linux/sched.h b/include/linux/sched.h index 6cc0df970f1a..21c26e78aec5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -521,6 +521,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_HAS_UPROBES 19 /* has uprobes */ #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_REAPED 21 /* mm has been already reaped */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) @@ -668,6 +669,7 @@ struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; + atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */ struct list_head thread_head; wait_queue_head_t wait_chldexit; /* for wait4() */ @@ -792,7 +794,11 @@ struct signal_struct { struct tty_audit_buf *tty_audit_buf; #endif - oom_flags_t oom_flags; + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj_min; /* OOM kill score adjustment min value. * Only settable by CAP_SYS_RESOURCE. */ @@ -2248,6 +2254,7 @@ static inline void memalloc_noio_restore(unsigned int flags) #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ #define TASK_PFA_TEST(name, func) \ @@ -2271,6 +2278,9 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) TASK_PFA_SET(SPREAD_SLAB, spread_slab) TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) +TASK_PFA_TEST(LMK_WAITING, lmk_waiting) +TASK_PFA_SET(LMK_WAITING, lmk_waiting) + /* * task->jobctl flags */ @@ -2721,14 +2731,24 @@ extern struct mm_struct * mm_alloc(void); /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct * mm) +static inline void mmdrop(struct mm_struct *mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); +/* same as above but performs the slow path from the async kontext. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); + /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* @@ -2757,7 +2777,14 @@ static inline int copy_thread_tls( } #endif extern void flush_thread(void); -extern void exit_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif extern void exit_files(struct task_struct *); extern void __cleanup_sighand(struct sighand_struct *); @@ -3351,7 +3378,10 @@ struct update_util_data { u64 time, unsigned long util, unsigned long max); }; -void cpufreq_set_update_util_data(int cpu, struct update_util_data *data); +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned long util, unsigned long max)); +void cpufreq_remove_update_util_hook(int cpu); #endif /* CONFIG_CPU_FREQ */ #endif diff --git a/include/linux/sctp.h b/include/linux/sctp.h index a9414fd49dc6..dacb5e711994 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -705,4 +705,71 @@ typedef struct sctp_auth_chunk { sctp_authhdr_t auth_hdr; } __packed sctp_auth_chunk_t; +struct sctp_info { + __u32 sctpi_tag; + __u32 sctpi_state; + __u32 sctpi_rwnd; + __u16 sctpi_unackdata; + __u16 sctpi_penddata; + __u16 sctpi_instrms; + __u16 sctpi_outstrms; + __u32 sctpi_fragmentation_point; + __u32 sctpi_inqueue; + __u32 sctpi_outqueue; + __u32 sctpi_overall_error; + __u32 sctpi_max_burst; + __u32 sctpi_maxseg; + __u32 sctpi_peer_rwnd; + __u32 sctpi_peer_tag; + __u8 sctpi_peer_capable; + __u8 sctpi_peer_sack; + __u16 __reserved1; + + /* assoc status info */ + __u64 sctpi_isacks; + __u64 sctpi_osacks; + __u64 sctpi_opackets; + __u64 sctpi_ipackets; + __u64 sctpi_rtxchunks; + __u64 sctpi_outofseqtsns; + __u64 sctpi_idupchunks; + __u64 sctpi_gapcnt; + __u64 sctpi_ouodchunks; + __u64 sctpi_iuodchunks; + __u64 sctpi_oodchunks; + __u64 sctpi_iodchunks; + __u64 sctpi_octrlchunks; + __u64 sctpi_ictrlchunks; + + /* primary transport info */ + struct sockaddr_storage sctpi_p_address; + __s32 sctpi_p_state; + __u32 sctpi_p_cwnd; + __u32 sctpi_p_srtt; + __u32 sctpi_p_rto; + __u32 sctpi_p_hbinterval; + __u32 sctpi_p_pathmaxrxt; + __u32 sctpi_p_sackdelay; + __u32 sctpi_p_sackfreq; + __u32 sctpi_p_ssthresh; + __u32 sctpi_p_partial_bytes_acked; + __u32 sctpi_p_flight_size; + __u16 sctpi_p_error; + __u16 __reserved2; + + /* sctp sock info */ + __u32 sctpi_s_autoclose; + __u32 sctpi_s_adaptation_ind; + __u32 sctpi_s_pd_point; + __u8 sctpi_s_nodelay; + __u8 sctpi_s_disable_fragments; + __u8 sctpi_s_v4mapped; + __u8 sctpi_s_frag_interleave; +}; + +struct sctp_infox { + struct sctp_info *sctpinfo; + struct sctp_association *asoc; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/security.h b/include/linux/security.h index 157f0cb1e4d2..14df373ff2ca 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -71,7 +71,7 @@ struct timezone; /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, int cap, int audit); -extern int cap_settime(const struct timespec *ts, const struct timezone *tz); +extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -208,7 +208,13 @@ int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); -int security_settime(const struct timespec *ts, const struct timezone *tz); +int security_settime64(const struct timespec64 *ts, const struct timezone *tz); +static inline int security_settime(const struct timespec *ts, const struct timezone *tz) +{ + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return security_settime64(&ts64, tz); +} int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_bprm_set_creds(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); @@ -222,10 +228,10 @@ int security_sb_remount(struct super_block *sb, void *data); int security_sb_kern_mount(struct super_block *sb, int flags, void *data); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); -int security_sb_mount(const char *dev_name, struct path *path, +int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data); int security_sb_umount(struct vfsmount *mnt, int flags); -int security_sb_pivotroot(struct path *old_path, struct path *new_path); +int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts, unsigned long kern_flags, @@ -462,10 +468,18 @@ static inline int security_syslog(int type) return 0; } +static inline int security_settime64(const struct timespec64 *ts, + const struct timezone *tz) +{ + return cap_settime(ts, tz); +} + static inline int security_settime(const struct timespec *ts, const struct timezone *tz) { - return cap_settime(ts, tz); + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return cap_settime(&ts64, tz); } static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) @@ -530,7 +544,7 @@ static inline int security_sb_statfs(struct dentry *dentry) return 0; } -static inline int security_sb_mount(const char *dev_name, struct path *path, +static inline int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { @@ -542,8 +556,8 @@ static inline int security_sb_umount(struct vfsmount *mnt, int flags) return 0; } -static inline int security_sb_pivotroot(struct path *old_path, - struct path *new_path) +static inline int security_sb_pivotroot(const struct path *old_path, + const struct path *new_path) { return 0; } @@ -1442,83 +1456,83 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_SECURITY_PATH -int security_path_unlink(struct path *dir, struct dentry *dentry); -int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode); -int security_path_rmdir(struct path *dir, struct dentry *dentry); -int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, +int security_path_unlink(const struct path *dir, struct dentry *dentry); +int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode); +int security_path_rmdir(const struct path *dir, struct dentry *dentry); +int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); -int security_path_truncate(struct path *path); -int security_path_symlink(struct path *dir, struct dentry *dentry, +int security_path_truncate(const struct path *path); +int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name); -int security_path_link(struct dentry *old_dentry, struct path *new_dir, +int security_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry); -int security_path_rename(struct path *old_dir, struct dentry *old_dentry, - struct path *new_dir, struct dentry *new_dentry, +int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry, unsigned int flags); -int security_path_chmod(struct path *path, umode_t mode); -int security_path_chown(struct path *path, kuid_t uid, kgid_t gid); -int security_path_chroot(struct path *path); +int security_path_chmod(const struct path *path, umode_t mode); +int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid); +int security_path_chroot(const struct path *path); #else /* CONFIG_SECURITY_PATH */ -static inline int security_path_unlink(struct path *dir, struct dentry *dentry) +static inline int security_path_unlink(const struct path *dir, struct dentry *dentry) { return 0; } -static inline int security_path_mkdir(struct path *dir, struct dentry *dentry, +static inline int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode) { return 0; } -static inline int security_path_rmdir(struct path *dir, struct dentry *dentry) +static inline int security_path_rmdir(const struct path *dir, struct dentry *dentry) { return 0; } -static inline int security_path_mknod(struct path *dir, struct dentry *dentry, +static inline int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { return 0; } -static inline int security_path_truncate(struct path *path) +static inline int security_path_truncate(const struct path *path) { return 0; } -static inline int security_path_symlink(struct path *dir, struct dentry *dentry, +static inline int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name) { return 0; } static inline int security_path_link(struct dentry *old_dentry, - struct path *new_dir, + const struct path *new_dir, struct dentry *new_dentry) { return 0; } -static inline int security_path_rename(struct path *old_dir, +static inline int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, - struct path *new_dir, + const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) { return 0; } -static inline int security_path_chmod(struct path *path, umode_t mode) +static inline int security_path_chmod(const struct path *path, umode_t mode) { return 0; } -static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) +static inline int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { return 0; } -static inline int security_path_chroot(struct path *path) +static inline int security_path_chroot(const struct path *path) { return 0; } diff --git a/include/linux/selection.h b/include/linux/selection.h index 85193aa8c1e3..8e4624efdb6f 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h @@ -24,10 +24,10 @@ extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); extern int console_blanked; -extern unsigned char color_table[]; -extern int default_red[]; -extern int default_grn[]; -extern int default_blu[]; +extern const unsigned char color_table[]; +extern unsigned char default_red[]; +extern unsigned char default_grn[]; +extern unsigned char default_blu[]; extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed); extern u16 screen_glyph(struct vc_data *vc, int offset); diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 434879759725..48ec7651989b 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -36,6 +36,7 @@ struct plat_serial8250_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, unsigned old); @@ -148,6 +149,7 @@ extern int early_serial8250_setup(struct earlycon_device *device, const char *options); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); +extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); extern int serial8250_do_startup(struct uart_port *port); extern void serial8250_do_shutdown(struct uart_port *port); extern void serial8250_do_pm(struct uart_port *port, unsigned int state, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index cbfcf38e220d..a3d7c0d4a03e 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -123,6 +123,7 @@ struct uart_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); int (*startup)(struct uart_port *port); void (*shutdown)(struct uart_port *port); @@ -281,6 +282,8 @@ struct uart_state { enum uart_pm_state pm_state; struct circ_buf xmit; + atomic_t refcount; + wait_queue_head_t remove_wait; struct uart_port *uart_port; }; diff --git a/include/linux/signal.h b/include/linux/signal.h index 3fbe81444d31..b63f63eaa39c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -28,6 +28,21 @@ struct sigpending { sigset_t signal; }; +#ifndef HAVE_ARCH_COPY_SIGINFO + +#include <linux/string.h> + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); +} + +#endif + /* * Define some primitives to manipulate sigset_t. */ @@ -385,7 +400,9 @@ int unhandled_signal(struct task_struct *tsk, int sig); #else #define rt_sigmask(sig) sigmask(sig) #endif -#define siginmask(sig, mask) (rt_sigmask(sig) & (mask)) + +#define siginmask(sig, mask) \ + ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) #define SIG_KERNEL_ONLY_MASK (\ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) @@ -406,14 +423,10 @@ int unhandled_signal(struct task_struct *tsk, int sig); rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) -#define sig_kernel_only(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK)) -#define sig_kernel_coredump(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK)) -#define sig_kernel_ignore(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK)) -#define sig_kernel_stop(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK)) +#define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK) +#define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK) +#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK) +#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK) #define sig_user_defined(t, signr) \ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 15d0df943466..ee38a4127475 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -382,14 +382,10 @@ enum { /* generate software time stamp when entering packet scheduling */ SKBTX_SCHED_TSTAMP = 1 << 6, - - /* generate software timestamp on peer data acknowledgment */ - SKBTX_ACK_TSTAMP = 1 << 7, }; #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ - SKBTX_SCHED_TSTAMP | \ - SKBTX_ACK_TSTAMP) + SKBTX_SCHED_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) /* @@ -465,23 +461,27 @@ enum { /* This indicates the tcp segment has CWR set. */ SKB_GSO_TCP_ECN = 1 << 3, - SKB_GSO_TCPV6 = 1 << 4, + SKB_GSO_TCP_FIXEDID = 1 << 4, + + SKB_GSO_TCPV6 = 1 << 5, - SKB_GSO_FCOE = 1 << 5, + SKB_GSO_FCOE = 1 << 6, - SKB_GSO_GRE = 1 << 6, + SKB_GSO_GRE = 1 << 7, - SKB_GSO_GRE_CSUM = 1 << 7, + SKB_GSO_GRE_CSUM = 1 << 8, - SKB_GSO_IPIP = 1 << 8, + SKB_GSO_IPXIP4 = 1 << 9, - SKB_GSO_SIT = 1 << 9, + SKB_GSO_IPXIP6 = 1 << 10, - SKB_GSO_UDP_TUNNEL = 1 << 10, + SKB_GSO_UDP_TUNNEL = 1 << 11, - SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, + SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12, - SKB_GSO_TUNNEL_REMCSUM = 1 << 12, + SKB_GSO_PARTIAL = 1 << 13, + + SKB_GSO_TUNNEL_REMCSUM = 1 << 14, }; #if BITS_PER_LONG > 32 @@ -1325,6 +1325,16 @@ static inline int skb_header_cloned(const struct sk_buff *skb) return dataref != 1; } +static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + + if (skb_header_cloned(skb)) + return pskb_expand_head(skb, 0, 0, pri); + + return 0; +} + /** * skb_header_release - release reference to header * @skb: buffer to operate on @@ -2457,7 +2467,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, static inline struct page *dev_alloc_pages(unsigned int order) { - return __dev_alloc_pages(GFP_ATOMIC, order); + return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); } /** @@ -2475,7 +2485,7 @@ static inline struct page *__dev_alloc_page(gfp_t gfp_mask) static inline struct page *dev_alloc_page(void) { - return __dev_alloc_page(GFP_ATOMIC); + return dev_alloc_pages(0); } /** @@ -2949,7 +2959,12 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); +void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); +static inline void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb) +{ + __skb_free_datagram_locked(sk, skb, 0); +} int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); @@ -2977,6 +2992,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, + gfp_t gfp); static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { @@ -3584,7 +3601,10 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) * Keeps track of level of encapsulation of network headers. */ struct skb_gso_cb { - int mac_offset; + union { + int mac_offset; + int data_offset; + }; int encap_level; __wsum csum; __u16 csum_start; diff --git a/include/linux/slab.h b/include/linux/slab.h index 508bd827e6dc..aeb3e6d00a66 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size) } #endif /* !CONFIG_SLOB */ -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; -void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; +void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); /* @@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; +void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) { @@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f #endif #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) __assume_slab_alignment; + int node, size_t size) __assume_slab_alignment __malloc; #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, @@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #ifdef CONFIG_TRACING -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #else static __always_inline void * kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 9edbbf352340..8694f7a5d92b 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -80,6 +80,10 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif +#ifdef CONFIG_SLAB_FREELIST_RANDOM + void *random_seq; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d0cb6d189a0a..cbb0f06c41b2 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -26,6 +26,8 @@ struct qcom_smd_device { struct qcom_smd_channel *channel; }; +typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t); + /** * struct qcom_smd_driver - smd driver struct * @driver: underlying device driver @@ -42,16 +44,71 @@ struct qcom_smd_driver { int (*probe)(struct qcom_smd_device *dev); void (*remove)(struct qcom_smd_device *dev); - int (*callback)(struct qcom_smd_device *, const void *, size_t); + qcom_smd_cb_t callback; }; +#if IS_ENABLED(CONFIG_QCOM_SMD) + int qcom_smd_driver_register(struct qcom_smd_driver *drv); void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); +struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel, + const char *name, + qcom_smd_cb_t cb); +void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel); +void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data); +int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); + + +#else + +static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv) +{ + return -ENXIO; +} + +static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline struct qcom_smd_channel * +qcom_smd_open_channel(struct qcom_smd_channel *channel, + const char *name, + qcom_smd_cb_t cb) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return NULL; +} + +void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return NULL; +} + +void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int qcom_smd_send(struct qcom_smd_channel *channel, + const void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return -ENXIO; +} + +#endif + #define module_qcom_smd_driver(__smd_driver) \ module_driver(__smd_driver, qcom_smd_driver_register, \ qcom_smd_driver_unregister) -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); #endif diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index f35e1512fcaa..7b88697929e9 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -1,12 +1,17 @@ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ +#include <linux/errno.h> + +struct device_node; struct qcom_smem_state; struct qcom_smem_state_ops { int (*update_bits)(void *, u32, u32); }; +#ifdef CONFIG_QCOM_SMEM_STATE + struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); void qcom_smem_state_put(struct qcom_smem_state *); @@ -15,4 +20,34 @@ int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 val struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data); void qcom_smem_state_unregister(struct qcom_smem_state *state); +#else + +static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, + const char *con_id, unsigned *bit) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_put(struct qcom_smem_state *state) +{ +} + +static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state, + u32 mask, u32 value) +{ + return -EINVAL; +} + +static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, + const struct qcom_smem_state_ops *ops, void *data) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_unregister(struct qcom_smem_state *state) +{ +} + +#endif + #endif diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h new file mode 100644 index 000000000000..92fc613ab23d --- /dev/null +++ b/include/linux/soc/renesas/rcar-sysc.h @@ -0,0 +1,16 @@ +#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__ +#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__ + +#include <linux/types.h> + +struct rcar_sysc_ch { + u16 chan_offs; + u8 chan_bit; + u8 isr_bit; +}; + +int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch); +int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch); +void __iomem *rcar_sysc_init(phys_addr_t base); + +#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 73bf6c6a833b..b5cc5a6d7011 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -201,8 +201,9 @@ struct ucred { #define AF_NFC 39 /* NFC sockets */ #define AF_VSOCK 40 /* vSockets */ #define AF_KCM 41 /* Kernel Connection Multiplexor*/ +#define AF_QIPCRTR 42 /* Qualcomm IPC Router */ -#define AF_MAX 42 /* For now.. */ +#define AF_MAX 43 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -249,6 +250,7 @@ struct ucred { #define PF_NFC AF_NFC #define PF_VSOCK AF_VSOCK #define PF_KCM AF_KCM +#define PF_QIPCRTR AF_QIPCRTR #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 857a9a1d82b5..1f03483f61e5 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -372,6 +372,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @unprepare_message: undo any work done by prepare_message(). * @spi_flash_read: to support spi-controller hardwares that provide * accelerated interface to read from flash devices. + * @flash_read_supported: spi device supports flash read * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). @@ -529,6 +530,7 @@ struct spi_master { struct spi_message *message); int (*spi_flash_read)(struct spi_device *spi, struct spi_flash_read_message *msg); + bool (*flash_read_supported)(struct spi_device *spi); /* * These hooks are for drivers that use a generic implementation @@ -1158,7 +1160,9 @@ struct spi_flash_read_message { /* SPI core interface for flash read support */ static inline bool spi_flash_read_supported(struct spi_device *spi) { - return spi->master->spi_flash_read ? true : false; + return spi->master->spi_flash_read && + (!spi->master->flash_read_supported || + spi->master->flash_read_supported(spi)); } int spi_flash_read(struct spi_device *spi, diff --git a/include/linux/stm.h b/include/linux/stm.h index 1a79ed8e43da..8369d8a8cabd 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -50,6 +50,8 @@ struct stm_device; * @sw_end: last STP master available to software * @sw_nchannels: number of STP channels per master * @sw_mmiosz: size of one channel's IO space, for mmap, optional + * @hw_override: masters in the STP stream will not match the ones + * assigned by software, but are up to the STM hardware * @packet: callback that sends an STP packet * @mmio_addr: mmap callback, optional * @link: called when a new stm_source gets linked to us, optional @@ -85,6 +87,7 @@ struct stm_data { unsigned int sw_end; unsigned int sw_nchannels; unsigned int sw_mmiosz; + unsigned int hw_override; ssize_t (*packet)(struct stm_data *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index e6bc30a42a74..ffdaca9c01af 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -137,5 +137,7 @@ struct plat_stmmacenet_data { void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; struct stmmac_axi *axi; + int has_gmac4; + bool tso_en; }; #endif diff --git a/include/linux/string.h b/include/linux/string.h index d3993a79a325..26b6f6a66f83 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -119,7 +119,7 @@ char *strreplace(char *s, char old, char new); extern void kfree_const(const void *x); -extern char *kstrdup(const char *s, gfp_t gfp); +extern char *kstrdup(const char *s, gfp_t gfp) __malloc; extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp); diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index dabe643eb5fa..5ce9538f290e 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -3,6 +3,8 @@ #include <linux/types.h> +struct file; + /* Descriptions of the types of units to * print in */ enum string_size_units { @@ -68,4 +70,8 @@ static inline int string_escape_str_any_np(const char *src, char *dst, return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); } +char *kstrdup_quotable(const char *src, gfp_t gfp); +char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); +char *kstrdup_quotable_file(struct file *file, gfp_t gfp); + #endif diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 3081339968c3..d6917b896d3a 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct xdr_buf *rcvbuf); /* svc_rdma_marshal.c */ -extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *); +extern int svc_rdma_xdr_decode_req(struct xdr_buf *); extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, struct rpcrdma_msg *, enum rpcrdma_errcode, __be32 *); diff --git a/include/linux/swap.h b/include/linux/swap.h index ad220359f1b0..0af2bb2028fd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -316,6 +316,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ +extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h new file mode 100644 index 000000000000..c6ffe8b0725c --- /dev/null +++ b/include/linux/sync_file.h @@ -0,0 +1,57 @@ +/* + * include/linux/sync_file.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_FILE_H +#define _LINUX_SYNC_FILE_H + +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/fence.h> + +struct sync_file_cb { + struct fence_cb cb; + struct fence *fence; + struct sync_file *sync_file; +}; + +/** + * struct sync_file - sync file to export to the userspace + * @file: file representing this fence + * @kref: reference count on fence. + * @name: name of sync_file. Useful for debugging + * @sync_file_list: membership in global file list + * @num_fences: number of sync_pts in the fence + * @wq: wait queue for fence signaling + * @status: 0: signaled, >0:active, <0: error + * @cbs: sync_pts callback information + */ +struct sync_file { + struct file *file; + struct kref kref; + char name[32]; +#ifdef CONFIG_DEBUG_FS + struct list_head sync_file_list; +#endif + int num_fences; + + wait_queue_head_t wq; + atomic_t status; + + struct sync_file_cb cbs[]; +}; + +struct sync_file *sync_file_create(struct fence *fence); + +#endif /* _LINUX_SYNC_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index d795472c54d8..d02239022bd0 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -371,10 +371,10 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, size_t sigsetsize); asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t __user *uinfo); -asmlinkage long sys_kill(int pid, int sig); -asmlinkage long sys_tgkill(int tgid, int pid, int sig); -asmlinkage long sys_tkill(int pid, int sig); -asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo); +asmlinkage long sys_kill(pid_t pid, int sig); +asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig); +asmlinkage long sys_tkill(pid_t pid, int sig); +asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); asmlinkage long sys_sgetmask(void); asmlinkage long sys_ssetmask(int newmask); asmlinkage long sys_signal(int sig, __sighandler_t handler); diff --git a/include/linux/time64.h b/include/linux/time64.h index 367d5af899e8..7e5d2fa9ac46 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -65,7 +65,6 @@ static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec * # define timespec64_equal timespec_equal # define timespec64_compare timespec_compare # define set_normalized_timespec64 set_normalized_timespec -# define timespec64_add_safe timespec_add_safe # define timespec64_add timespec_add # define timespec64_sub timespec_sub # define timespec64_valid timespec_valid @@ -134,15 +133,6 @@ static inline int timespec64_compare(const struct timespec64 *lhs, const struct extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); -/* - * timespec64_add_safe assumes both values are positive and checks for - * overflow. It will return TIME_T_MAX if the returned value would be - * smaller then either of the arguments. - */ -extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, - const struct timespec64 rhs); - - static inline struct timespec64 timespec64_add(struct timespec64 lhs, struct timespec64 rhs) { @@ -224,4 +214,11 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) #endif +/* + * timespec64_add_safe assumes both values are positive and checks for + * overflow. It will return TIME64_MAX in case of overflow. + */ +extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, + const struct timespec64 rhs); + #endif /* _LINUX_TIME64_H */ diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 96f37bee3bc1..37dbacf84849 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -1,6 +1,8 @@ #ifndef _LINUX_TIMEKEEPING_H #define _LINUX_TIMEKEEPING_H +#include <asm-generic/errno-base.h> + /* Included from linux/ktime.h */ void timekeeping_init(void); @@ -11,8 +13,19 @@ extern int timekeeping_suspended; */ extern void do_gettimeofday(struct timeval *tv); extern int do_settimeofday64(const struct timespec64 *ts); -extern int do_sys_settimeofday(const struct timespec *tv, - const struct timezone *tz); +extern int do_sys_settimeofday64(const struct timespec64 *tv, + const struct timezone *tz); +static inline int do_sys_settimeofday(const struct timespec *tv, + const struct timezone *tz) +{ + struct timespec64 ts64; + + if (!tv) + return -EINVAL; + + ts64 = timespec_to_timespec64(*tv); + return do_sys_settimeofday64(&ts64, tz); +} /* * Kernel time accessors diff --git a/include/linux/timer.h b/include/linux/timer.h index 61aa61dc410c..20ac746f3eb3 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -145,6 +145,8 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define setup_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), 0) +#define setup_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) #define setup_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), 0) #define setup_deferrable_timer_on_stack(timer, fn, data) \ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0810f81b6db2..be007610ceb0 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -154,21 +154,6 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc); -struct ring_buffer_event * -trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, - int type, unsigned long len, - unsigned long flags, int pc); -void trace_buffer_unlock_commit(struct trace_array *tr, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc); -void trace_buffer_unlock_commit_regs(struct trace_array *tr, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc, - struct pt_regs *regs); -void trace_current_buffer_discard_commit(struct ring_buffer *buffer, - struct ring_buffer_event *event); void tracing_record_cmdline(struct task_struct *tsk); @@ -229,7 +214,6 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_WAS_ENABLED_BIT, - TRACE_EVENT_FL_USE_CALL_FILTER_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, @@ -244,7 +228,6 @@ enum { * WAS_ENABLED - Set and stays set when an event was ever enabled * (used for module unloading, if a module event is enabled, * it is best to clear the buffers that used it). - * USE_CALL_FILTER - For trace internal events, don't use file filter * TRACEPOINT - Event is a tracepoint * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe @@ -255,7 +238,6 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), - TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), @@ -407,16 +389,12 @@ enum event_trigger_type { ETT_SNAPSHOT = (1 << 1), ETT_STACKTRACE = (1 << 2), ETT_EVENT_ENABLE = (1 << 3), + ETT_EVENT_HIST = (1 << 4), + ETT_HIST_ENABLE = (1 << 5), }; extern int filter_match_preds(struct event_filter *filter, void *rec); -extern int filter_check_discard(struct trace_event_file *file, void *rec, - struct ring_buffer *buffer, - struct ring_buffer_event *event); -extern int call_filter_check_discard(struct trace_event_call *call, void *rec, - struct ring_buffer *buffer, - struct ring_buffer_event *event); extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, void *rec); extern void event_triggers_post_call(struct trace_event_file *file, @@ -450,100 +428,6 @@ trace_trigger_soft_disabled(struct trace_event_file *file) return false; } -/* - * Helper function for event_trigger_unlock_commit{_regs}(). - * If there are event triggers attached to this event that requires - * filtering against its fields, then they wil be called as the - * entry already holds the field information of the current event. - * - * It also checks if the event should be discarded or not. - * It is to be discarded if the event is soft disabled and the - * event was only recorded to process triggers, or if the event - * filter is active and this event did not match the filters. - * - * Returns true if the event is discarded, false otherwise. - */ -static inline bool -__event_trigger_test_discard(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, - enum event_trigger_type *tt) -{ - unsigned long eflags = file->flags; - - if (eflags & EVENT_FILE_FL_TRIGGER_COND) - *tt = event_triggers_call(file, entry); - - if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags)) - ring_buffer_discard_commit(buffer, event); - else if (!filter_check_discard(file, entry, buffer, event)) - return false; - - return true; -} - -/** - * event_trigger_unlock_commit - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event - * @buffer: The ring buffer that the event is being written to - * @event: The event meta data in the ring buffer - * @entry: The event itself - * @irq_flags: The state of the interrupts at the start of the event - * @pc: The state of the preempt count at the start of the event. - * - * This is a helper function to handle triggers that require data - * from the event itself. It also tests the event against filters and - * if the event is soft disabled and should be discarded. - */ -static inline void -event_trigger_unlock_commit(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, unsigned long irq_flags, int pc) -{ - enum event_trigger_type tt = ETT_NONE; - - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); - - if (tt) - event_triggers_post_call(file, tt, entry); -} - -/** - * event_trigger_unlock_commit_regs - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event - * @buffer: The ring buffer that the event is being written to - * @event: The event meta data in the ring buffer - * @entry: The event itself - * @irq_flags: The state of the interrupts at the start of the event - * @pc: The state of the preempt count at the start of the event. - * - * This is a helper function to handle triggers that require data - * from the event itself. It also tests the event against filters and - * if the event is soft disabled and should be discarded. - * - * Same as event_trigger_unlock_commit() but calls - * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). - */ -static inline void -event_trigger_unlock_commit_regs(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, unsigned long irq_flags, int pc, - struct pt_regs *regs) -{ - enum event_trigger_type tt = ETT_NONE; - - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit_regs(file->tr, buffer, event, - irq_flags, pc, regs); - - if (tt) - event_triggers_post_call(file, tt, entry); -} - #ifdef CONFIG_BPF_EVENTS unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx); #else @@ -569,6 +453,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type, int is_signed, int filter_type); extern int trace_add_event_call(struct trace_event_call *call); extern int trace_remove_event_call(struct trace_event_call *call); +extern int trace_event_get_offsets(struct trace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) @@ -605,15 +490,20 @@ extern void perf_trace_del(struct perf_event *event, int flags); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); -extern void *perf_trace_buf_prepare(int size, unsigned short type, - struct pt_regs **regs, int *rctxp); +void perf_trace_buf_update(void *record, u16 type); +void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); + +void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, + struct trace_event_call *call, u64 count, + struct pt_regs *regs, struct hlist_head *head, + struct task_struct *task); static inline void -perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, +perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, u64 count, struct pt_regs *regs, void *head, struct task_struct *task) { - perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task); + perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); } #endif diff --git a/include/linux/tty.h b/include/linux/tty.h index 3b09f235db66..40144f382516 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -228,7 +228,8 @@ struct tty_port { int count; /* Usage count */ wait_queue_head_t open_wait; /* Open waiters */ wait_queue_head_t delta_msr_wait; /* Modem status change */ - unsigned long flags; /* TTY flags ASY_*/ + unsigned long flags; /* User TTY flags ASYNC_ */ + unsigned long iflags; /* Internal flags TTY_PORT_ */ unsigned char console:1, /* port is a console */ low_latency:1; /* optional: tune for latency */ struct mutex mutex; /* Locking */ @@ -242,6 +243,18 @@ struct tty_port { struct kref kref; /* Ref counter */ }; +/* tty_port::iflags bits -- use atomic bit ops */ +#define TTY_PORT_INITIALIZED 0 /* device is initialized */ +#define TTY_PORT_SUSPENDED 1 /* device is suspended */ +#define TTY_PORT_ACTIVE 2 /* device is open */ + +/* + * uart drivers: use the uart_port::status field and the UPSTAT_* defines + * for s/w-based flow control steering and carrier detection status + */ +#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ +#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ + /* * Where all of the state associated with a tty is kept while the tty * is open. Since the termios state should be kept even if the tty @@ -338,7 +351,6 @@ struct tty_file_private { #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ -#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ #define TTY_LDISC_OPEN 11 /* Line discipline is open */ #define TTY_PTY_LOCK 16 /* pty private */ #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ @@ -360,6 +372,16 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val) smp_mb(); } +static inline bool tty_io_error(struct tty_struct *tty) +{ + return test_bit(TTY_IO_ERROR, &tty->flags); +} + +static inline bool tty_throttled(struct tty_struct *tty) +{ + return test_bit(TTY_THROTTLED, &tty->flags); +} + #ifdef CONFIG_TTY extern void console_init(void); extern void tty_kref_put(struct tty_struct *tty); @@ -371,6 +393,7 @@ extern void proc_clear_tty(struct task_struct *p); extern struct tty_struct *get_current_tty(void); /* tty_io.c */ extern int __init tty_init(void); +extern const char *tty_name(const struct tty_struct *tty); #else static inline void console_init(void) { } @@ -391,6 +414,8 @@ static inline struct tty_struct *get_current_tty(void) /* tty_io.c */ static inline int __init tty_init(void) { return 0; } +static inline const char *tty_name(const struct tty_struct *tty) +{ return "(none)"; } #endif extern struct ktermios tty_std_termios; @@ -415,7 +440,6 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) return tty; } -extern const char *tty_name(const struct tty_struct *tty); extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern int __tty_check_change(struct tty_struct *tty, int sig); @@ -457,6 +481,7 @@ extern void tty_buffer_init(struct tty_port *port); extern void tty_buffer_set_lock_subclass(struct tty_port *port); extern bool tty_buffer_restart_work(struct tty_port *port); extern bool tty_buffer_cancel_work(struct tty_port *port); +extern void tty_buffer_flush_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, @@ -537,7 +562,67 @@ static inline struct tty_port *tty_port_get(struct tty_port *port) /* If the cts flow control is enabled, return true. */ static inline bool tty_port_cts_enabled(struct tty_port *port) { - return port->flags & ASYNC_CTS_FLOW; + return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CTS_FLOW, &port->iflags); + else + clear_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline bool tty_port_active(struct tty_port *port) +{ + return test_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline void tty_port_set_active(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_ACTIVE, &port->iflags); + else + clear_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline bool tty_port_check_carrier(struct tty_port *port) +{ + return test_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CHECK_CD, &port->iflags); + else + clear_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline bool tty_port_suspended(struct tty_port *port) +{ + return test_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline void tty_port_set_suspended(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_SUSPENDED, &port->iflags); + else + clear_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline bool tty_port_initialized(struct tty_port *port) +{ + return test_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +static inline void tty_port_set_initialized(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_INITIALIZED, &port->iflags); + else + clear_bit(TTY_PORT_INITIALIZED, &port->iflags); } extern struct tty_struct *tty_port_tty_get(struct tty_port *port); diff --git a/include/linux/types.h b/include/linux/types.h index 70dd3dfde631..baf718324f4a 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -156,7 +156,6 @@ typedef u32 dma_addr_t; typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ fmode_t; -typedef unsigned __bitwise__ oom_flags_t; #ifdef CONFIG_PHYS_ADDR_T_64BIT typedef u64 phys_addr_t; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index df89c9bcba7d..d3a2bb712af3 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) #endif } +static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + raw_write_seqcount_begin(&syncp->seq); +#endif +} + +static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + raw_write_seqcount_end(&syncp->seq); +#endif +} + static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) diff --git a/include/linux/udp.h b/include/linux/udp.h index 87c094961bd5..d1fd8cd39478 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -71,6 +71,14 @@ struct udp_sock { */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); void (*encap_destroy)(struct sock *sk); + + /* GRO functions for UDP socket */ + struct sk_buff ** (*gro_receive)(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb); + int (*gro_complete)(struct sock *sk, + struct sk_buff *skb, + int nhoff); }; static inline struct udp_sock *udp_sk(const struct sock *sk) @@ -98,11 +106,11 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } -#define udp_portaddr_for_each_entry(__sk, node, list) \ - hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) +#define udp_portaddr_for_each_entry(__sk, list) \ + hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) -#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \ - hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node) +#define udp_portaddr_for_each_entry_rcu(__sk, list) \ + hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) #define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) diff --git a/include/linux/usb.h b/include/linux/usb.h index 6a9a0c28415d..eba1f10e8cfd 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -374,13 +374,12 @@ struct usb_bus { int devnum_next; /* Next open device number in * round-robin allocation */ + struct mutex devnum_next_mutex; /* devnum_next mutex */ struct usb_devmap devmap; /* device address allocation map */ struct usb_device *root_hub; /* Root hub */ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ - struct mutex usb_address0_mutex; /* unaddressed device mutex */ - int bandwidth_allocated; /* on this bus: how much of the time * reserved for periodic (intr/iso) * requests is used, on average? @@ -720,7 +719,7 @@ extern void usb_enable_ltm(struct usb_device *udev); static inline bool usb_device_supports_ltm(struct usb_device *udev) { - if (udev->speed != USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) + if (udev->speed < USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) return false; return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT; } @@ -1069,7 +1068,7 @@ struct usbdrv_wrap { * for interfaces bound to this driver. * @soft_unbind: if set to 1, the USB core will not kill URBs and disable * endpoints before calling the driver's disconnect method. - * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs + * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs * to initiate lower power link state transitions when an idle timeout * occurs. Device-initiated USB 3.0 link PM will still be allowed. * @@ -1569,7 +1568,7 @@ static inline void usb_fill_bulk_urb(struct urb *urb, * Initializes a interrupt urb with the proper information needed to submit * it to a device. * - * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic + * Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic * encoding of the endpoint interval, and express polling intervals in * microframes (eight per millisecond) rather than in frames (one per * millisecond). @@ -1595,7 +1594,7 @@ static inline void usb_fill_int_urb(struct urb *urb, urb->complete = complete_fn; urb->context = context; - if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) { + if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { /* make sure interval is within allowed range */ interval = clamp(interval, 1, 16); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 5d4e151c49bf..457651bf45b0 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -1223,9 +1223,13 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget, /* utility to simplify map/unmap of usb_requests to/from DMA */ +extern int usb_gadget_map_request_by_dev(struct device *dev, + struct usb_request *req, int is_in); extern int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); +extern void usb_gadget_unmap_request_by_dev(struct device *dev, + struct usb_request *req, int is_in); extern void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index b98f831dcda3..66fc13705ab7 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -181,6 +181,7 @@ struct usb_hcd { * bandwidth_mutex should be dropped after a successful control message * to the device, or resetting the bandwidth after a failed attempt. */ + struct mutex *address0_mutex; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 24198e16f849..7a0350535cb1 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -72,37 +72,113 @@ enum otg_fsm_timer { NUM_OTG_FSM_TIMERS, }; -/* OTG state machine according to the OTG spec */ +/** + * struct otg_fsm - OTG state machine according to the OTG spec + * + * OTG hardware Inputs + * + * Common inputs for A and B device + * @id: TRUE for B-device, FALSE for A-device. + * @adp_change: TRUE when current ADP measurement (n) value, compared to the + * ADP measurement taken at n-2, differs by more than CADP_THR + * @power_up: TRUE when the OTG device first powers up its USB system and + * ADP measurement taken if ADP capable + * + * A-Device state inputs + * @a_srp_det: TRUE if the A-device detects SRP + * @a_vbus_vld: TRUE when VBUS voltage is in regulation + * @b_conn: TRUE if the A-device detects connection from the B-device + * @a_bus_resume: TRUE when the B-device detects that the A-device is signaling + * a resume (K state) + * B-Device state inputs + * @a_bus_suspend: TRUE when the B-device detects that the A-device has put the + * bus into suspend + * @a_conn: TRUE if the B-device detects a connection from the A-device + * @b_se0_srp: TRUE when the line has been at SE0 for more than the minimum + * time before generating SRP + * @b_ssend_srp: TRUE when the VBUS has been below VOTG_SESS_VLD for more than + * the minimum time before generating SRP + * @b_sess_vld: TRUE when the B-device detects that the voltage on VBUS is + * above VOTG_SESS_VLD + * @test_device: TRUE when the B-device switches to B-Host and detects an OTG + * test device. This must be set by host/hub driver + * + * Application inputs (A-Device) + * @a_bus_drop: TRUE when A-device application needs to power down the bus + * @a_bus_req: TRUE when A-device application wants to use the bus. + * FALSE to suspend the bus + * + * Application inputs (B-Device) + * @b_bus_req: TRUE during the time that the Application running on the + * B-device wants to use the bus + * + * Auxilary inputs (OTG v1.3 only. Obsolete now.) + * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD + * @b_bus_suspend: TRUE when the A-device detects that the B-device has put + * the bus into suspend + * @b_bus_resume: TRUE when the A-device detects that the B-device is signaling + * resume on the bus + * + * OTG Output status. Read only for users. Updated by OTG FSM helpers defined + * in this file + * + * Outputs for Both A and B device + * @drv_vbus: TRUE when A-device is driving VBUS + * @loc_conn: TRUE when the local device has signaled that it is connected + * to the bus + * @loc_sof: TRUE when the local device is generating activity on the bus + * @adp_prb: TRUE when the local device is in the process of doing + * ADP probing + * + * Outputs for B-device state + * @adp_sns: TRUE when the B-device is in the process of carrying out + * ADP sensing + * @data_pulse: TRUE when the B-device is performing data line pulsing + * + * Internal Variables + * + * a_set_b_hnp_en: TRUE when the A-device has successfully set the + * b_hnp_enable bit in the B-device. + * Unused as OTG fsm uses otg->host->b_hnp_enable instead + * b_srp_done: TRUE when the B-device has completed initiating SRP + * b_hnp_enable: TRUE when the B-device has accepted the + * SetFeature(b_hnp_enable) B-device. + * Unused as OTG fsm uses otg->gadget->b_hnp_enable instead + * a_clr_err: Asserted (by application ?) to clear a_vbus_err due to an + * overcurrent condition and causes the A-device to transition + * to a_wait_vfall + */ struct otg_fsm { /* Input */ int id; int adp_change; int power_up; - int test_device; - int a_bus_drop; - int a_bus_req; int a_srp_det; int a_vbus_vld; int b_conn; int a_bus_resume; int a_bus_suspend; int a_conn; - int b_bus_req; int b_se0_srp; int b_ssend_srp; int b_sess_vld; + int test_device; + int a_bus_drop; + int a_bus_req; + int b_bus_req; + /* Auxilary inputs */ int a_sess_vld; int b_bus_resume; int b_bus_suspend; /* Output */ - int data_pulse; int drv_vbus; int loc_conn; int loc_sof; int adp_prb; int adp_sns; + int data_pulse; /* Internal variables */ int a_set_b_hnp_en; @@ -110,7 +186,7 @@ struct otg_fsm { int b_hnp_enable; int a_clr_err; - /* Informative variables */ + /* Informative variables. All unused as of now */ int a_bus_drop_inf; int a_bus_req_inf; int a_clr_err_inf; @@ -134,6 +210,7 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; + bool state_changed; }; struct otg_fsm_ops { diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 6df2509033d7..2d095fc60204 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -1,7 +1,7 @@ /* * UUID/GUID definition * - * Copyright (C) 2010, Intel Corp. + * Copyright (C) 2010, 2016 Intel Corp. * Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or @@ -12,16 +12,17 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_UUID_H_ #define _LINUX_UUID_H_ #include <uapi/linux/uuid.h> +/* + * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") + * not including trailing NUL. + */ +#define UUID_STRING_LEN 36 static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) { @@ -33,7 +34,17 @@ static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2) return memcmp(&u1, &u2, sizeof(uuid_be)); } +void generate_random_uuid(unsigned char uuid[16]); + extern void uuid_le_gen(uuid_le *u); extern void uuid_be_gen(uuid_be *u); +bool __must_check uuid_is_valid(const char *uuid); + +extern const u8 uuid_le_index[16]; +extern const u8 uuid_be_index[16]; + +int uuid_le_to_bin(const char *uuid, uuid_le *u); +int uuid_be_to_bin(const char *uuid, uuid_be *u); + #endif diff --git a/include/linux/verification.h b/include/linux/verification.h new file mode 100644 index 000000000000..a10549a6c7cd --- /dev/null +++ b/include/linux/verification.h @@ -0,0 +1,49 @@ +/* Signature verification + * + * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_VERIFICATION_H +#define _LINUX_VERIFICATION_H + +/* + * The use to which an asymmetric key is being put. + */ +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE, + VERIFYING_FIRMWARE_SIGNATURE, + VERIFYING_KEXEC_PE_SIGNATURE, + VERIFYING_KEY_SIGNATURE, + VERIFYING_KEY_SELF_SIGNATURE, + VERIFYING_UNSPECIFIED_SIGNATURE, + NR__KEY_BEING_USED_FOR +}; +extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; + +#ifdef CONFIG_SYSTEM_DATA_VERIFICATION + +struct key; + +extern int verify_pkcs7_signature(const void *data, size_t len, + const void *raw_pkcs7, size_t pkcs7_len, + struct key *trusted_keys, + enum key_being_used_for usage, + int (*view_content)(void *ctx, + const void *data, size_t len, + size_t asn1hdrlen), + void *ctx); + +#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION +extern int verify_pefile_signature(const void *pebuf, unsigned pelen, + struct key *trusted_keys, + enum key_being_used_for usage); +#endif + +#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ +#endif /* _LINUX_VERIFY_PEFILE_H */ diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h deleted file mode 100644 index da2049b5161c..000000000000 --- a/include/linux/verify_pefile.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Signed PE file verification - * - * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _LINUX_VERIFY_PEFILE_H -#define _LINUX_VERIFY_PEFILE_H - -#include <crypto/public_key.h> - -extern int verify_pefile_signature(const void *pebuf, unsigned pelen, - struct key *trusted_keyring, - enum key_being_used_for usage, - bool *_trusted); - -#endif /* _LINUX_VERIFY_PEFILE_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index d1f1d338af20..3d9d786a943c 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -4,10 +4,12 @@ #include <linux/spinlock.h> #include <linux/init.h> #include <linux/list.h> +#include <linux/llist.h> #include <asm/page.h> /* pgprot_t */ #include <linux/rbtree.h> struct vm_area_struct; /* vma defining user mapping in mm_types.h */ +struct notifier_block; /* in notifier.h */ /* bits in flags of vmalloc's vm_struct below */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ @@ -44,7 +46,7 @@ struct vmap_area { unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ - struct list_head purge_list; /* "lazy purge" list */ + struct llist_node purge_list; /* "lazy purge" list */ struct vm_struct *vm; struct rcu_head rcu_head; }; @@ -187,4 +189,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) #define VMALLOC_TOTAL 0UL #endif +int register_vmap_purge_notifier(struct notifier_block *nb); +int unregister_vmap_purge_notifier(struct notifier_block *nb); + #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 73fae8c4a5fb..d2da8e053210 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, #ifdef CONFIG_NUMA extern unsigned long node_page_state(int node, enum zone_stat_item item); -extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #else #define node_page_state(node, item) global_page_state(item) -#define zone_statistics(_zl, _z, gfp) do { } while (0) #endif /* CONFIG_NUMA */ @@ -193,6 +191,10 @@ void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); +struct ctl_table; +int vmstat_refresh(struct ctl_table *, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); int calculate_pressure_threshold(struct zone *zone); diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 4457541de3c9..1cc4c578deb9 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -30,7 +30,8 @@ struct xattr_handler { int flags; /* fs private flags */ bool (*list)(struct dentry *dentry); int (*get)(const struct xattr_handler *, struct dentry *dentry, - const char *name, void *buffer, size_t size); + struct inode *inode, const char *name, void *buffer, + size_t size); int (*set)(const struct xattr_handler *, struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags); @@ -51,7 +52,7 @@ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, i int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int vfs_removexattr(struct dentry *, const char *); -ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); +ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int generic_removexattr(struct dentry *dentry, const char *name); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 34eb16098a33..57a8e98f2708 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -41,10 +41,10 @@ struct zs_pool_stats { struct zs_pool; -struct zs_pool *zs_create_pool(const char *name, gfp_t flags); +struct zs_pool *zs_create_pool(const char *name); void zs_destroy_pool(struct zs_pool *pool); -unsigned long zs_malloc(struct zs_pool *pool, size_t size); +unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); void zs_free(struct zs_pool *pool, unsigned long obj); void *zs_map_object(struct zs_pool *pool, unsigned long handle, |