diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2016-12-16 20:31:17 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2016-12-16 20:31:17 +0300 |
commit | f26e8817b235d8764363bffcc9cbfc61867371f2 (patch) | |
tree | 6546ea2cf91b78f1ada2161db61e21085c880740 /include/linux | |
parent | 2425f1808123bf69a8f66d4ec90e0d0e302c2613 (diff) | |
parent | ebfb0184ef560897fad35005989e82433419202c (diff) | |
download | linux-f26e8817b235d8764363bffcc9cbfc61867371f2.tar.xz |
Merge branch 'next' into for-linus
Prepare input updates for 4.10 merge window.
Diffstat (limited to 'include/linux')
519 files changed, 20692 insertions, 6887 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 06ed7e54033e..c5eaf2f80a4c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -190,14 +190,6 @@ static inline int acpi_debugger_notify_command_complete(void) } #endif -#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE -void acpi_initrd_override(void *data, size_t size); -#else -static inline void acpi_initrd_override(void *data, size_t size) -{ -} -#endif - #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) @@ -239,12 +231,26 @@ int acpi_table_parse_madt(enum acpi_madt_type id, int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); -/* the following four functions are architecture-dependent */ +/* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); -void acpi_numa_arch_fixup(void); #ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; @@ -278,6 +284,7 @@ void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; extern u32 acpi_irq_not_handled; extern unsigned int acpi_sci_irq; +extern bool acpi_no_s5; #define INVALID_ACPI_IRQ ((unsigned)-1) static inline bool acpi_sci_irq_valid(void) { @@ -311,7 +318,6 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); -void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); @@ -359,7 +365,6 @@ extern bool wmi_has_guid(const char *guid); extern char acpi_video_backlight_string[]; extern long acpi_is_video_device(acpi_handle handle); extern int acpi_blacklisted(void); -extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); @@ -452,8 +457,12 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 #define OSC_SB_APEI_SUPPORT 0x00000010 #define OSC_SB_CPC_SUPPORT 0x00000020 +#define OSC_SB_CPCV2_SUPPORT 0x00000040 +#define OSC_SB_PCLPI_SUPPORT 0x00000080 +#define OSC_SB_OSLPI_SUPPORT 0x00000100 extern bool osc_sb_apei_support_acked; +extern bool osc_pc_lpi_support_confirmed; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 @@ -540,6 +549,24 @@ void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr) +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = true; +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = false; +} + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE, +}; + +int acpi_reconfig_notifier_register(struct notifier_block *nb); +int acpi_reconfig_notifier_unregister(struct notifier_block *nb); + #else /* !CONFIG_ACPI */ #define acpi_disabled 1 @@ -551,6 +578,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); struct fwnode_handle; +static inline bool acpi_dev_found(const char *hid) +{ + return false; +} + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; @@ -576,6 +608,12 @@ static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwn return NULL; } +static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, + const char *name) +{ + return false; +} + static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) { return NULL; @@ -661,6 +699,14 @@ static inline bool acpi_driver_match_device(struct device *dev, return false; } +static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, + const u8 *uuid, + int rev, int func, + union acpi_object *argv4) +{ + return NULL; +} + static inline int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) { @@ -685,6 +731,24 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) #define ACPI_PTR(_ptr) (NULL) +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ +} + +static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} + #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI @@ -992,7 +1056,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, return NULL; } -#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ static const void * __acpi_table_##name[] \ __attribute__((unused)) \ = { (void *) table_id, \ @@ -1004,4 +1068,10 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif +#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 52f3b7da4f2d..9d8031257a90 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -26,10 +26,10 @@ enum alarmtimer_restart { * struct alarm - Alarm timer structure * @node: timerqueue node for adding to the event list this value * also includes the expiration time. - * @period: Period for recuring alarms + * @timer: hrtimer used to schedule events while running * @function: Function pointer to be executed when the timer fires. - * @type: Alarm type (BOOTTIME/REALTIME) - * @enabled: Flag that represents if the alarm is set to fire or not + * @type: Alarm type (BOOTTIME/REALTIME). + * @state: Flag that represents if the alarm is set to fire or not. * @data: Internal data value. */ struct alarm { diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 10fe2a211c2e..27e9ec8778eb 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -86,7 +86,7 @@ struct pl08x_channel_data { * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 */ struct pl08x_platform_data { - const struct pl08x_channel_data *slave_channels; + struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; int (*get_xfer_signal)(const struct pl08x_channel_data *); diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h index b2d32e01dfe4..714186de8c36 100644 --- a/include/linux/apple-gmux.h +++ b/include/linux/apple-gmux.h @@ -35,7 +35,7 @@ */ static inline bool apple_gmux_present(void) { - return acpi_dev_present(GMUX_ACPI_HID); + return acpi_dev_found(GMUX_ACPI_HID); } #else /* !CONFIG_APPLE_GMUX */ diff --git a/include/linux/ata.h b/include/linux/ata.h index c1a2f345cbe6..adbc812c009b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -46,8 +46,9 @@ enum { ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, ATA_MAX_SECTORS_1024 = 1024, - ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ + ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ ATA_ID_WORDS = 256, ATA_ID_CONFIG = 0, @@ -243,6 +244,7 @@ enum { ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, ATA_CMD_FPDMA_READ = 0x60, ATA_CMD_FPDMA_WRITE = 0x61, + ATA_CMD_NCQ_NON_DATA = 0x63, ATA_CMD_FPDMA_SEND = 0x64, ATA_CMD_FPDMA_RECV = 0x65, ATA_CMD_PIO_READ = 0x20, @@ -301,19 +303,43 @@ enum { ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, ATA_CMD_REQ_SENSE_DATA = 0x0B, ATA_CMD_SANITIZE_DEVICE = 0xB4, + ATA_CMD_ZAC_MGMT_IN = 0x4A, + ATA_CMD_ZAC_MGMT_OUT = 0x9F, /* marked obsolete in the ATA/ATAPI-7 spec */ ATA_CMD_RESTORE = 0x10, + /* Subcmds for ATA_CMD_FPDMA_RECV */ + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 0x02, + /* Subcmds for ATA_CMD_FPDMA_SEND */ ATA_SUBCMD_FPDMA_SEND_DSM = 0x00, ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02, + /* Subcmds for ATA_CMD_NCQ_NON_DATA */ + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0x00, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 0x06, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07, + + /* Subcmds for ATA_CMD_ZAC_MGMT_IN */ + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00, + + /* Subcmds for ATA_CMD_ZAC_MGMT_OUT */ + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04, + /* READ_LOG_EXT pages */ + ATA_LOG_DIRECTORY = 0x0, ATA_LOG_SATA_NCQ = 0x10, + ATA_LOG_NCQ_NON_DATA = 0x12, ATA_LOG_NCQ_SEND_RECV = 0x13, ATA_LOG_SATA_ID_DEV_DATA = 0x30, ATA_LOG_SATA_SETTINGS = 0x08, + ATA_LOG_ZONED_INFORMATION = 0x09, ATA_LOG_DEVSLP_OFFSET = 0x30, ATA_LOG_DEVSLP_SIZE = 0x08, ATA_LOG_DEVSLP_MDAT = 0x00, @@ -328,8 +354,25 @@ enum { ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04, ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0), ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = (1 << 0), ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C, - ATA_LOG_NCQ_SEND_RECV_SIZE = 0x10, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 0x10, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1), + ATA_LOG_NCQ_SEND_RECV_SIZE = 0x14, + + /* NCQ Non-Data log */ + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = (1 << 0), + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = (1 << 1), + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = (1 << 2), + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3), + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = (1 << 4), + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 0x1C, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = (1 << 0), + ATA_LOG_NCQ_NON_DATA_SIZE = 0x40, /* READ/WRITE LONG (obsolete) */ ATA_CMD_READ_LONG = 0x22, @@ -367,11 +410,15 @@ enum { SETFEATURES_WC_ON = 0x02, /* Enable write cache */ SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + SETFEATURES_RA_ON = 0xaa, /* Enable read look-ahead */ + SETFEATURES_RA_OFF = 0x55, /* Disable read look-ahead */ + /* Enable/Disable Automatic Acoustic Management */ SETFEATURES_AAM_ON = 0x42, SETFEATURES_AAM_OFF = 0xC2, - SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ + SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ + SETFEATURES_SPINUP_TIMEOUT = 30000, /* 30s timeout for drive spin-up from PUIS */ SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ @@ -385,6 +432,8 @@ enum { SATA_SSP = 0x06, /* Software Settings Preservation */ SATA_DEVSLP = 0x09, /* Device Sleep */ + SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ + /* feature values for SET_MAX */ ATA_SET_MAX_ADDR = 0x00, ATA_SET_MAX_PASSWD = 0x01, @@ -474,16 +523,23 @@ enum { SERR_DEV_XCHG = (1 << 26), /* device exchanged */ }; -enum ata_tf_protocols { - /* ATA taskfile protocols */ - ATA_PROT_UNKNOWN, /* unknown/invalid */ - ATA_PROT_NODATA, /* no data */ - ATA_PROT_PIO, /* PIO data xfer */ - ATA_PROT_DMA, /* DMA */ - ATA_PROT_NCQ, /* NCQ */ - ATAPI_PROT_NODATA, /* packet command, no data */ - ATAPI_PROT_PIO, /* packet command, PIO data xfer*/ - ATAPI_PROT_DMA, /* packet command with special DMA sauce */ +enum ata_prot_flags { + /* protocol flags */ + ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ + ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ + ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ + ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ + + /* taskfile protocols */ + ATA_PROT_UNKNOWN = (u8)-1, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = ATA_PROT_FLAG_PIO, + ATA_PROT_DMA = ATA_PROT_FLAG_DMA, + ATA_PROT_NCQ_NODATA = ATA_PROT_FLAG_NCQ, + ATA_PROT_NCQ = ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ, + ATAPI_PROT_NODATA = ATA_PROT_FLAG_ATAPI, + ATAPI_PROT_PIO = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO, + ATAPI_PROT_DMA = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA, }; enum ata_ioctls { @@ -528,6 +584,8 @@ struct ata_bmdma_prd { #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) #define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) +#define ata_id_has_ncq_autosense(id) \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) static inline bool ata_id_has_hipm(const u16 *id) { @@ -716,6 +774,20 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id) return false; } +static inline bool ata_id_has_sense_reporting(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_3] & (1 << 6); +} + +static inline bool ata_id_sense_reporting_enabled(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_4] & (1 << 6); +} + /** * ata_id_major_version - get ATA level of drive * @id: Identify data @@ -820,6 +892,11 @@ static inline bool ata_id_has_ncq_send_and_recv(const u16 *id) return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6); } +static inline bool ata_id_has_ncq_non_data(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); +} + static inline bool ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && @@ -871,6 +948,11 @@ static inline bool ata_id_is_ssd(const u16 *id) return id[ATA_ID_ROT_SPEED] == 0x01; } +static inline u8 ata_id_zoned_cap(const u16 *id) +{ + return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3); +} + static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio) { /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ @@ -995,12 +1077,12 @@ static inline void ata_id_to_hd_driveid(u16 *id) * TO NV CACHE PINNED SET. */ static inline unsigned ata_set_lba_range_entries(void *_buffer, - unsigned buf_size, u64 sector, unsigned long count) + unsigned num, u64 sector, unsigned long count) { __le64 *buffer = _buffer; unsigned i = 0, used_bytes; - while (i < buf_size / 8 ) { /* 6-byte LBA + 2-byte range per entry */ + while (i < num) { u64 entry = sector | ((u64)(count > 0xffff ? 0xffff : count) << 48); buffer[i++] = __cpu_to_le64(entry); @@ -1024,13 +1106,13 @@ static inline bool ata_ok(u8 status) static inline bool lba_28_ok(u64 block, u32 n_block) { /* check the ending block number: must be LESS THAN 0x0fffffff */ - return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256); + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS); } static inline bool lba_48_ok(u64 block, u32 n_block) { /* check the ending block number */ - return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536); + return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48); } #define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index 33eb274cd0e6..76860a461ed2 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,11 +31,16 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + u32 bt_active_pin; + u32 bt_priority_pin; + u32 wlan_active_pin; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; bool disable_2ghz; bool disable_5ghz; + bool led_active_high; int (*get_mac_revision)(void); int (*external_reset)(void); diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 506c3531832e..e71835bf60a9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -163,206 +163,265 @@ #endif #endif /* atomic_dec_return_relaxed */ -/* atomic_xchg_relaxed */ -#ifndef atomic_xchg_relaxed -#define atomic_xchg_relaxed atomic_xchg -#define atomic_xchg_acquire atomic_xchg -#define atomic_xchg_release atomic_xchg -#else /* atomic_xchg_relaxed */ +/* atomic_fetch_add_relaxed */ +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add -#ifndef atomic_xchg_acquire -#define atomic_xchg_acquire(...) \ - __atomic_op_acquire(atomic_xchg, __VA_ARGS__) +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +#define atomic_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) #endif -#ifndef atomic_xchg_release -#define atomic_xchg_release(...) \ - __atomic_op_release(atomic_xchg, __VA_ARGS__) +#ifndef atomic_fetch_add_release +#define atomic_fetch_add_release(...) \ + __atomic_op_release(atomic_fetch_add, __VA_ARGS__) #endif -#ifndef atomic_xchg -#define atomic_xchg(...) \ - __atomic_op_fence(atomic_xchg, __VA_ARGS__) +#ifndef atomic_fetch_add +#define atomic_fetch_add(...) \ + __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic_fetch_add_relaxed */ + +/* atomic_fetch_inc_relaxed */ +#ifndef atomic_fetch_inc_relaxed + +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) +#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) +#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) +#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) +#else /* atomic_fetch_inc */ +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +#define atomic_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) #endif -#endif /* atomic_xchg_relaxed */ -/* atomic_cmpxchg_relaxed */ -#ifndef atomic_cmpxchg_relaxed -#define atomic_cmpxchg_relaxed atomic_cmpxchg -#define atomic_cmpxchg_acquire atomic_cmpxchg -#define atomic_cmpxchg_release atomic_cmpxchg +#ifndef atomic_fetch_inc_release +#define atomic_fetch_inc_release(...) \ + __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) +#endif -#else /* atomic_cmpxchg_relaxed */ +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(...) \ + __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic_fetch_inc_relaxed */ -#ifndef atomic_cmpxchg_acquire -#define atomic_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) +/* atomic_fetch_sub_relaxed */ +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub + +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +#define atomic_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) #endif -#ifndef atomic_cmpxchg_release -#define atomic_cmpxchg_release(...) \ - __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) +#ifndef atomic_fetch_sub_release +#define atomic_fetch_sub_release(...) \ + __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) #endif -#ifndef atomic_cmpxchg -#define atomic_cmpxchg(...) \ - __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) +#ifndef atomic_fetch_sub +#define atomic_fetch_sub(...) \ + __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic_fetch_sub_relaxed */ + +/* atomic_fetch_dec_relaxed */ +#ifndef atomic_fetch_dec_relaxed + +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) +#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) +#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) +#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) +#else /* atomic_fetch_dec */ +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +#define atomic_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) #endif -#endif /* atomic_cmpxchg_relaxed */ -#ifndef atomic64_read_acquire -#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#ifndef atomic_fetch_dec_release +#define atomic_fetch_dec_release(...) \ + __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) #endif -#ifndef atomic64_set_release -#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(...) \ + __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) #endif +#endif /* atomic_fetch_dec_relaxed */ -/* atomic64_add_return_relaxed */ -#ifndef atomic64_add_return_relaxed -#define atomic64_add_return_relaxed atomic64_add_return -#define atomic64_add_return_acquire atomic64_add_return -#define atomic64_add_return_release atomic64_add_return +/* atomic_fetch_or_relaxed */ +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or -#else /* atomic64_add_return_relaxed */ +#else /* atomic_fetch_or_relaxed */ -#ifndef atomic64_add_return_acquire -#define atomic64_add_return_acquire(...) \ - __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or_acquire +#define atomic_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) #endif -#ifndef atomic64_add_return_release -#define atomic64_add_return_release(...) \ - __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or_release +#define atomic_fetch_or_release(...) \ + __atomic_op_release(atomic_fetch_or, __VA_ARGS__) #endif -#ifndef atomic64_add_return -#define atomic64_add_return(...) \ - __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or +#define atomic_fetch_or(...) \ + __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) #endif -#endif /* atomic64_add_return_relaxed */ +#endif /* atomic_fetch_or_relaxed */ -/* atomic64_inc_return_relaxed */ -#ifndef atomic64_inc_return_relaxed -#define atomic64_inc_return_relaxed atomic64_inc_return -#define atomic64_inc_return_acquire atomic64_inc_return -#define atomic64_inc_return_release atomic64_inc_return +/* atomic_fetch_and_relaxed */ +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_relaxed atomic_fetch_and +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and -#else /* atomic64_inc_return_relaxed */ +#else /* atomic_fetch_and_relaxed */ -#ifndef atomic64_inc_return_acquire -#define atomic64_inc_return_acquire(...) \ - __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and_acquire +#define atomic_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) #endif -#ifndef atomic64_inc_return_release -#define atomic64_inc_return_release(...) \ - __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and_release +#define atomic_fetch_and_release(...) \ + __atomic_op_release(atomic_fetch_and, __VA_ARGS__) #endif -#ifndef atomic64_inc_return -#define atomic64_inc_return(...) \ - __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and +#define atomic_fetch_and(...) \ + __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) #endif -#endif /* atomic64_inc_return_relaxed */ - +#endif /* atomic_fetch_and_relaxed */ -/* atomic64_sub_return_relaxed */ -#ifndef atomic64_sub_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return -#define atomic64_sub_return_acquire atomic64_sub_return -#define atomic64_sub_return_release atomic64_sub_return +#ifdef atomic_andnot +/* atomic_fetch_andnot_relaxed */ +#ifndef atomic_fetch_andnot_relaxed +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot -#else /* atomic64_sub_return_relaxed */ +#else /* atomic_fetch_andnot_relaxed */ -#ifndef atomic64_sub_return_acquire -#define atomic64_sub_return_acquire(...) \ - __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot_acquire +#define atomic_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) #endif -#ifndef atomic64_sub_return_release -#define atomic64_sub_return_release(...) \ - __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot_release +#define atomic_fetch_andnot_release(...) \ + __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) #endif -#ifndef atomic64_sub_return -#define atomic64_sub_return(...) \ - __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(...) \ + __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) #endif -#endif /* atomic64_sub_return_relaxed */ +#endif /* atomic_fetch_andnot_relaxed */ +#endif /* atomic_andnot */ -/* atomic64_dec_return_relaxed */ -#ifndef atomic64_dec_return_relaxed -#define atomic64_dec_return_relaxed atomic64_dec_return -#define atomic64_dec_return_acquire atomic64_dec_return -#define atomic64_dec_return_release atomic64_dec_return +/* atomic_fetch_xor_relaxed */ +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor -#else /* atomic64_dec_return_relaxed */ +#else /* atomic_fetch_xor_relaxed */ -#ifndef atomic64_dec_return_acquire -#define atomic64_dec_return_acquire(...) \ - __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor_acquire +#define atomic_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) #endif -#ifndef atomic64_dec_return_release -#define atomic64_dec_return_release(...) \ - __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor_release +#define atomic_fetch_xor_release(...) \ + __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) #endif -#ifndef atomic64_dec_return -#define atomic64_dec_return(...) \ - __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor +#define atomic_fetch_xor(...) \ + __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) #endif -#endif /* atomic64_dec_return_relaxed */ +#endif /* atomic_fetch_xor_relaxed */ -/* atomic64_xchg_relaxed */ -#ifndef atomic64_xchg_relaxed -#define atomic64_xchg_relaxed atomic64_xchg -#define atomic64_xchg_acquire atomic64_xchg -#define atomic64_xchg_release atomic64_xchg -#else /* atomic64_xchg_relaxed */ +/* atomic_xchg_relaxed */ +#ifndef atomic_xchg_relaxed +#define atomic_xchg_relaxed atomic_xchg +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg -#ifndef atomic64_xchg_acquire -#define atomic64_xchg_acquire(...) \ - __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +#define atomic_xchg_acquire(...) \ + __atomic_op_acquire(atomic_xchg, __VA_ARGS__) #endif -#ifndef atomic64_xchg_release -#define atomic64_xchg_release(...) \ - __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#ifndef atomic_xchg_release +#define atomic_xchg_release(...) \ + __atomic_op_release(atomic_xchg, __VA_ARGS__) #endif -#ifndef atomic64_xchg -#define atomic64_xchg(...) \ - __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#ifndef atomic_xchg +#define atomic_xchg(...) \ + __atomic_op_fence(atomic_xchg, __VA_ARGS__) #endif -#endif /* atomic64_xchg_relaxed */ +#endif /* atomic_xchg_relaxed */ -/* atomic64_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_relaxed -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg -#define atomic64_cmpxchg_acquire atomic64_cmpxchg -#define atomic64_cmpxchg_release atomic64_cmpxchg +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg -#else /* atomic64_cmpxchg_relaxed */ +#else /* atomic_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_acquire -#define atomic64_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg_acquire +#define atomic_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) #endif -#ifndef atomic64_cmpxchg_release -#define atomic64_cmpxchg_release(...) \ - __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg_release +#define atomic_cmpxchg_release(...) \ + __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) #endif -#ifndef atomic64_cmpxchg -#define atomic64_cmpxchg(...) \ - __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(...) \ + __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) #endif -#endif /* atomic64_cmpxchg_relaxed */ +#endif /* atomic_cmpxchg_relaxed */ /* cmpxchg_relaxed */ #ifndef cmpxchg_relaxed @@ -463,18 +522,28 @@ static inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); } -#endif -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +static inline int atomic_fetch_andnot(int i, atomic_t *v) { - atomic_andnot(mask, v); + return atomic_fetch_and(~i, v); } -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - atomic_or(mask, v); + return atomic_fetch_and_relaxed(~i, v); } +static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} + +static inline int atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} +#endif + /** * atomic_inc_not_zero_hint - increment if not null * @v: pointer of type atomic_t @@ -558,36 +627,400 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -/** - * atomic_fetch_or - perform *p |= mask and return old value of *p - * @p: pointer to atomic_t - * @mask: mask to OR on the atomic_t - */ -#ifndef atomic_fetch_or -static inline int atomic_fetch_or(atomic_t *p, int mask) -{ - int old, val = atomic_read(p); +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif - for (;;) { - old = atomic_cmpxchg(p, val, val | mask); - if (old == val) - break; - val = old; - } +#ifndef atomic64_read_acquire +#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif - return old; -} +#ifndef atomic64_set_release +#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) #endif -#ifdef CONFIG_GENERIC_ATOMIC64 -#include <asm-generic/atomic64.h> +/* atomic64_add_return_relaxed */ +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_relaxed atomic64_add_return +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return + +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +#define atomic64_add_return_acquire(...) \ + __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_release +#define atomic64_add_return_release(...) \ + __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return +#define atomic64_add_return(...) \ + __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#endif +#endif /* atomic64_add_return_relaxed */ + +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + +/* atomic64_sub_return_relaxed */ +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return + +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +#define atomic64_sub_return_acquire(...) \ + __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) #endif +#ifndef atomic64_sub_return_release +#define atomic64_sub_return_release(...) \ + __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return +#define atomic64_sub_return(...) \ + __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#endif +#endif /* atomic64_sub_return_relaxed */ + +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + + +/* atomic64_fetch_add_relaxed */ +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add + +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +#define atomic64_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add_release +#define atomic64_fetch_add_release(...) \ + __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add +#define atomic64_fetch_add(...) \ + __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_add_relaxed */ + +/* atomic64_fetch_inc_relaxed */ +#ifndef atomic64_fetch_inc_relaxed + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) +#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) +#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) +#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) +#else /* atomic64_fetch_inc */ +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +#define atomic64_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc_release +#define atomic64_fetch_inc_release(...) \ + __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(...) \ + __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_inc_relaxed */ + +/* atomic64_fetch_sub_relaxed */ +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub + +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +#define atomic64_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub_release +#define atomic64_fetch_sub_release(...) \ + __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub +#define atomic64_fetch_sub(...) \ + __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_sub_relaxed */ + +/* atomic64_fetch_dec_relaxed */ +#ifndef atomic64_fetch_dec_relaxed + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) +#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) +#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) +#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) +#else /* atomic64_fetch_dec */ +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +#define atomic64_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec_release +#define atomic64_fetch_dec_release(...) \ + __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(...) \ + __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_dec_relaxed */ + +/* atomic64_fetch_or_relaxed */ +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or + +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +#define atomic64_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or_release +#define atomic64_fetch_or_release(...) \ + __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or +#define atomic64_fetch_or(...) \ + __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_or_relaxed */ + +/* atomic64_fetch_and_relaxed */ +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and + +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +#define atomic64_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and_release +#define atomic64_fetch_and_release(...) \ + __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and +#define atomic64_fetch_and(...) \ + __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_and_relaxed */ + +#ifdef atomic64_andnot +/* atomic64_fetch_andnot_relaxed */ +#ifndef atomic64_fetch_andnot_relaxed +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +#define atomic64_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot_release +#define atomic64_fetch_andnot_release(...) \ + __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(...) \ + __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_andnot_relaxed */ +#endif /* atomic64_andnot */ + +/* atomic64_fetch_xor_relaxed */ +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor + +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +#define atomic64_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor_release +#define atomic64_fetch_xor_release(...) \ + __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor +#define atomic64_fetch_xor(...) \ + __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_xor_relaxed */ + + +/* atomic64_xchg_relaxed */ +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_relaxed atomic64_xchg +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg + +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +#define atomic64_xchg_acquire(...) \ + __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_release +#define atomic64_xchg_release(...) \ + __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg +#define atomic64_xchg(...) \ + __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#endif +#endif /* atomic64_xchg_relaxed */ + +/* atomic64_cmpxchg_relaxed */ +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg + +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +#define atomic64_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_release +#define atomic64_cmpxchg_release(...) \ + __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg +#define atomic64_cmpxchg(...) \ + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic64_cmpxchg_relaxed */ + #ifndef atomic64_andnot static inline void atomic64_andnot(long long i, atomic64_t *v) { atomic64_and(~i, v); } + +static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} + +static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} + +static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} + +static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} #endif #include <asm-generic/atomic-long.h> diff --git a/include/linux/audit.h b/include/linux/audit.h index e38e3fc13ea8..9d4443f93db6 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -163,8 +163,6 @@ extern void audit_log_task_info(struct audit_buffer *ab, extern int audit_update_lsm_rules(void); /* Private API (for audit.c only) */ -extern int audit_filter_user(int type); -extern int audit_filter_type(int type); extern int audit_rule_change(int type, __u32 portid, int seq, void *data, size_t datasz); extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 3f103076d0bf..c357f27d5483 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -163,6 +163,7 @@ struct backing_dev_info { wait_queue_head_t wb_waitq; struct device *dev; + struct device *owner; struct timer_list laptop_mode_wb_timer; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c82794f20110..43b93a947e61 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -24,6 +24,7 @@ __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); @@ -197,7 +198,7 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) } long congestion_wait(int sync, long timeout); -long wait_iff_congested(struct zone *zone, int sync, long timeout); +long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout); int pdflush_proc_obsolete(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 1e7a69adbe6f..5f2fd61ef4fb 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -141,9 +141,10 @@ extern void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd); extern void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); -extern bool backlight_device_registered(enum backlight_type type); extern int backlight_register_notifier(struct notifier_block *nb); extern int backlight_unregister_notifier(struct notifier_block *nb); +extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 9b0a15d06a4f..79542b2698ec 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -48,6 +48,7 @@ #include <linux/migrate.h> #include <linux/gfp.h> #include <linux/err.h> +#include <linux/fs.h> /* * Balloon device information descriptor. @@ -62,6 +63,7 @@ struct balloon_dev_info { struct list_head pages; /* Pages enqueued & handled to Host */ int (*migratepage)(struct balloon_dev_info *, struct page *newpage, struct page *page, enum migrate_mode mode); + struct inode *inode; }; extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); @@ -73,45 +75,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) spin_lock_init(&balloon->pages_lock); INIT_LIST_HEAD(&balloon->pages); balloon->migratepage = NULL; + balloon->inode = NULL; } #ifdef CONFIG_BALLOON_COMPACTION -extern bool balloon_page_isolate(struct page *page); +extern const struct address_space_operations balloon_aops; +extern bool balloon_page_isolate(struct page *page, + isolate_mode_t mode); extern void balloon_page_putback(struct page *page); -extern int balloon_page_migrate(struct page *newpage, +extern int balloon_page_migrate(struct address_space *mapping, + struct page *newpage, struct page *page, enum migrate_mode mode); /* - * __is_movable_balloon_page - helper to perform @page PageBalloon tests - */ -static inline bool __is_movable_balloon_page(struct page *page) -{ - return PageBalloon(page); -} - -/* - * balloon_page_movable - test PageBalloon to identify balloon pages - * and PagePrivate to check that the page is not - * isolated and can be moved by compaction/migration. - * - * As we might return false positives in the case of a balloon page being just - * released under us, this need to be re-tested later, under the page lock. - */ -static inline bool balloon_page_movable(struct page *page) -{ - return PageBalloon(page) && PagePrivate(page); -} - -/* - * isolated_balloon_page - identify an isolated balloon page on private - * compaction/migration page lists. - */ -static inline bool isolated_balloon_page(struct page *page) -{ - return PageBalloon(page); -} - -/* * balloon_page_insert - insert a page into the balloon's page list and make * the page->private assignment accordingly. * @balloon : pointer to balloon device @@ -124,7 +100,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageBalloon(page); - SetPagePrivate(page); + __SetPageMovable(page, balloon->inode->i_mapping); set_page_private(page, (unsigned long)balloon); list_add(&page->lru, &balloon->pages); } @@ -140,11 +116,14 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, static inline void balloon_page_delete(struct page *page) { __ClearPageBalloon(page); + __ClearPageMovable(page); set_page_private(page, 0); - if (PagePrivate(page)) { - ClearPagePrivate(page); + /* + * No touch page.lru field once @page has been isolated + * because VM is using the field. + */ + if (!PageIsolated(page)) list_del(&page->lru); - } } /* diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h new file mode 100644 index 000000000000..c06b47c84e1a --- /dev/null +++ b/include/linux/bcm47xx_sprom.h @@ -0,0 +1,24 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __BCM47XX_SPROM_H +#define __BCM47XX_SPROM_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/vmalloc.h> + +#ifdef CONFIG_BCM47XX_SPROM +int bcm47xx_sprom_register_fallbacks(void); +#else +static inline int bcm47xx_sprom_register_fallbacks(void) +{ + return -ENOTSUPP; +}; +#endif + +#endif /* __BCM47XX_SPROM_H */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0367c63f5960..3db25df396cb 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -4,6 +4,7 @@ #include <linux/pci.h> #include <linux/mod_devicetable.h> +#include <linux/bcma/bcma_driver_arm_c9.h> #include <linux/bcma/bcma_driver_chipcommon.h> #include <linux/bcma/bcma_driver_pci.h> #include <linux/bcma/bcma_driver_pcie2.h> @@ -158,6 +159,7 @@ struct bcma_host_ops { #define BCMA_CORE_DEFAULT 0xFFF #define BCMA_MAX_NR_CORES 16 +#define BCMA_CORE_SIZE 0x1000 /* Chip IDs of PCIe devices */ #define BCMA_CHIP_ID_BCM4313 0x4313 diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h new file mode 100644 index 000000000000..93bd73d670d5 --- /dev/null +++ b/include/linux/bcma/bcma_driver_arm_c9.h @@ -0,0 +1,15 @@ +#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_ +#define LINUX_BCMA_DRIVER_ARM_C9_H_ + +/* DMU (Device Management Unit) */ +#define BCMA_DMU_CRU_USB2_CONTROL 0x0164 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK 0x00000FFC +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT 2 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK 0x00007000 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT 12 +#define BCMA_DMU_CRU_CLKSET_KEY 0x0180 +#define BCMA_DMU_CRU_STRAPS_CTRL 0x02A0 +#define BCMA_DMU_CRU_STRAPS_CTRL_USB3 0x00000010 +#define BCMA_DMU_CRU_STRAPS_CTRL_4BYTE 0x00008000 + +#endif /* LINUX_BCMA_DRIVER_ARM_C9_H_ */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 846513c73606..b20e3d56253f 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -504,6 +504,9 @@ #define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 #define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 +#define BCMA_CCB_MII_MNG_CTL 0x0000 +#define BCMA_CCB_MII_MNG_CMD_DATA 0x0004 + /* BCM4331 ChipControl numbers. */ #define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */ #define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */ @@ -587,7 +590,6 @@ struct mtd_info; struct bcma_sflash { bool present; - u32 window; u32 blocksize; u16 numblocks; u32 size; diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 576e4639ca60..1303b570b18c 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -65,6 +65,7 @@ struct coredump_params { unsigned long limit; unsigned long mm_flags; loff_t written; + loff_t pos; }; /* @@ -112,6 +113,8 @@ extern int suid_dumpable; extern int setup_arg_pages(struct linux_binprm * bprm, unsigned long stack_top, int executable_stack); +extern int transfer_args_to_stack(struct linux_binprm *bprm, + unsigned long *sp_location); extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); extern int copy_strings_kernel(int argc, const char *const *argv, struct linux_binprm *bprm); diff --git a/include/linux/bio.h b/include/linux/bio.h index 6b7481f62218..23ddf4b46a9b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -41,44 +41,9 @@ #endif #define BIO_MAX_PAGES 256 -#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT) -#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) -/* - * upper 16 bits of bi_rw define the io priority of this bio - */ -#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) -#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) -#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) - -#define bio_set_prio(bio, prio) do { \ - WARN_ON(prio >= (1 << IOPRIO_BITS)); \ - (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ - (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ -} while (0) - -/* - * various member access, note that bio_data should of course not be used - * on highmem page vectors - */ -#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) - -#define bvec_iter_page(bvec, iter) \ - (__bvec_iter_bvec((bvec), (iter))->bv_page) - -#define bvec_iter_len(bvec, iter) \ - min((iter).bi_size, \ - __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) - -#define bvec_iter_offset(bvec, iter) \ - (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) - -#define bvec_iter_bvec(bvec, iter) \ -((struct bio_vec) { \ - .bv_page = bvec_iter_page((bvec), (iter)), \ - .bv_len = bvec_iter_len((bvec), (iter)), \ - .bv_offset = bvec_iter_offset((bvec), (iter)), \ -}) +#define bio_prio(bio) (bio)->bi_ioprio +#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) #define bio_iter_iovec(bio, iter) \ bvec_iter_bvec((bio)->bi_io_vec, (iter)) @@ -106,18 +71,26 @@ static inline bool bio_has_data(struct bio *bio) { if (bio && bio->bi_iter.bi_size && - !(bio->bi_rw & REQ_DISCARD)) + bio_op(bio) != REQ_OP_DISCARD && + bio_op(bio) != REQ_OP_SECURE_ERASE) return true; return false; } +static inline bool bio_no_advance_iter(struct bio *bio) +{ + return bio_op(bio) == REQ_OP_DISCARD || + bio_op(bio) == REQ_OP_SECURE_ERASE || + bio_op(bio) == REQ_OP_WRITE_SAME; +} + static inline bool bio_is_rw(struct bio *bio) { if (!bio_has_data(bio)) return false; - if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) + if (bio_no_advance_iter(bio)) return false; return true; @@ -125,7 +98,7 @@ static inline bool bio_is_rw(struct bio *bio) static inline bool bio_mergeable(struct bio *bio) { - if (bio->bi_rw & REQ_NOMERGE_FLAGS) + if (bio->bi_opf & REQ_NOMERGE_FLAGS) return false; return true; @@ -193,39 +166,12 @@ static inline void *bio_data(struct bio *bio) #define bio_for_each_segment_all(bvl, bio, i) \ for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) -static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, - unsigned bytes) -{ - WARN_ONCE(bytes > iter->bi_size, - "Attempted to advance past end of bvec iter\n"); - - while (bytes) { - unsigned len = min(bytes, bvec_iter_len(bv, *iter)); - - bytes -= len; - iter->bi_size -= len; - iter->bi_bvec_done += len; - - if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { - iter->bi_bvec_done = 0; - iter->bi_idx++; - } - } -} - -#define for_each_bvec(bvl, bio_vec, iter, start) \ - for (iter = (start); \ - (iter).bi_size && \ - ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) - - static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned bytes) { iter->bi_sector += bytes >> 9; - if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) + if (bio_no_advance_iter(bio)) iter->bi_size -= bytes; else bvec_iter_advance(bio->bi_io_vec, iter, bytes); @@ -253,10 +199,13 @@ static inline unsigned bio_segments(struct bio *bio) * differently: */ - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) return 1; - if (bio->bi_rw & REQ_WRITE_SAME) + if (bio_op(bio) == REQ_OP_SECURE_ERASE) + return 1; + + if (bio_op(bio) == REQ_OP_WRITE_SAME) return 1; bio_for_each_segment(bv, bio, iter) @@ -375,7 +324,7 @@ struct bio_integrity_payload { static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) { - if (bio->bi_rw & REQ_INTEGRITY) + if (bio->bi_opf & REQ_INTEGRITY) return bio->bi_integrity; return NULL; @@ -473,7 +422,7 @@ static inline void bio_io_error(struct bio *bio) struct request_queue; extern int bio_phys_segments(struct request_queue *, struct bio *); -extern int submit_bio_wait(int rw, struct bio *bio); +extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); extern void bio_init(struct bio *); @@ -527,11 +476,14 @@ extern unsigned int bvec_nr_vecs(unsigned short idx); int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); int bio_associate_current(struct bio *bio); void bio_disassociate_task(struct bio *bio); +void bio_clone_blkcg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ static inline int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) { return 0; } static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } static inline void bio_disassociate_task(struct bio *bio) { } +static inline void bio_clone_blkcg_association(struct bio *dst, + struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_HIGHMEM @@ -703,14 +655,23 @@ static inline struct bio *bio_list_get(struct bio_list *bl) } /* + * Increment chain count for the bio. Make sure the CHAIN flag update + * is visible before the raised count. + */ +static inline void bio_inc_remaining(struct bio *bio) +{ + bio_set_flag(bio, BIO_CHAIN); + smp_mb__before_atomic(); + atomic_inc(&bio->__bi_remaining); +} + +/* * bio_set is used to allow other portions of the IO system to * allocate their own private memory pools for bio and iovec structures. * These memory pools in turn all allocate from the bio_slab * and the bvec_slabs[]. */ #define BIO_POOL_SIZE 2 -#define BIOVEC_NR_POOLS 6 -#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) struct bio_set { struct kmem_cache *bio_slab; diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e9b0b9ab07e5..598bc999f4c2 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -266,9 +266,12 @@ static inline int bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) - return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); - else - return __bitmap_equal(src1, src2, nbits); + return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); +#ifdef CONFIG_S390 + if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0) + return !memcmp(src1, src2, nbits / 8); +#endif + return __bitmap_equal(src1, src2, nbits); } static inline int bitmap_intersects(const unsigned long *src1, diff --git a/include/linux/bitops.h b/include/linux/bitops.h index defeaac0745f..299e76b59fe9 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -227,6 +227,22 @@ static inline unsigned long __ffs64(u64 word) }) #endif +#ifndef bit_clear_unless +#define bit_clear_unless(ptr, _clear, _test) \ +({ \ + const typeof(*ptr) clear = (_clear), test = (_test); \ + typeof(*ptr) old, new; \ + \ + do { \ + old = ACCESS_ONCE(*ptr); \ + new = old & ~clear; \ + } while (!(old & test) && \ + cmpxchg(ptr, old, new) != old); \ + \ + !(old & test); \ +}) +#endif + #ifndef find_last_bit /** * find_last_bit - find the last set bit in a memory region diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index c02e669945e9..10648e300c93 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -590,25 +590,26 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) /** * blkg_rwstat_add - add a value to a blkg_rwstat * @rwstat: target blkg_rwstat - * @rw: mask of REQ_{WRITE|SYNC} + * @op: REQ_OP + * @op_flags: rq_flag_bits * @val: value to add * * Add @val to @rwstat. The counters are chosen according to @rw. The * caller is responsible for synchronizing calls to this function. */ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, - int rw, uint64_t val) + int op, int op_flags, uint64_t val) { struct percpu_counter *cnt; - if (rw & REQ_WRITE) + if (op_is_write(op)) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); - if (rw & REQ_SYNC) + if (op_flags & REQ_SYNC) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; @@ -713,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, + blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); + blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9ac9799b702b..e43bbffb5b7a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -96,6 +96,7 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int, unsigned int, unsigned int); typedef void (exit_request_fn)(void *, struct request *, unsigned int, unsigned int); +typedef int (reinit_request_fn)(void *, struct request *); typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); @@ -145,6 +146,7 @@ struct blk_mq_ops { */ init_request_fn *init_request; exit_request_fn *exit_request; + reinit_request_fn *reinit_request; }; enum { @@ -196,6 +198,8 @@ enum { struct request *blk_mq_alloc_request(struct request_queue *q, int rw, unsigned int flags); +struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, + unsigned int flags, unsigned int hctx_idx); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); @@ -238,11 +242,12 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, - void *priv); +void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, + busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 86a38ea1823f..436f43f87da9 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -6,6 +6,7 @@ #define __LINUX_BLK_TYPES_H #include <linux/types.h> +#include <linux/bvec.h> struct bio_set; struct bio; @@ -17,28 +18,7 @@ struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); typedef void (bio_destructor_t) (struct bio *); -/* - * was unsigned short, but we might as well be ready for > 64kB I/O pages - */ -struct bio_vec { - struct page *bv_page; - unsigned int bv_len; - unsigned int bv_offset; -}; - #ifdef CONFIG_BLOCK - -struct bvec_iter { - sector_t bi_sector; /* device address in 512 byte - sectors */ - unsigned int bi_size; /* residual I/O count */ - - unsigned int bi_idx; /* current index into bvl_vec */ - - unsigned int bi_bvec_done; /* number of bytes completed in - current bvec */ -}; - /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -46,11 +26,13 @@ struct bvec_iter { struct bio { struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; - unsigned int bi_flags; /* status, command, etc */ int bi_error; - unsigned long bi_rw; /* bottom bits READ/WRITE, - * top bits priority + unsigned int bi_opf; /* bottom bits req flags, + * top bits REQ_OP. Use + * accessors. */ + unsigned short bi_flags; /* status, command, etc */ + unsigned short bi_ioprio; struct bvec_iter bi_iter; @@ -107,6 +89,16 @@ struct bio { struct bio_vec bi_inline_vecs[0]; }; +#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) +#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) + +#define bio_set_op_attrs(bio, op, op_flags) do { \ + WARN_ON(op >= (1 << REQ_OP_BITS)); \ + (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \ + (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \ + (bio)->bi_opf |= op_flags; \ +} while (0) + #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) /* @@ -123,29 +115,34 @@ struct bio { /* * Flags starting here get preserved by bio_reset() - this includes - * BIO_POOL_IDX() + * BVEC_POOL_IDX() + */ +#define BIO_RESET_BITS 10 + +/* + * We support 6 different bvec pools, the last one is magic in that it + * is backed by a mempool. */ -#define BIO_RESET_BITS 13 -#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */ +#define BVEC_POOL_NR 6 +#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) /* - * top 4 bits of bio flags indicate the pool this bio came from + * Top 4 bits of bio flags indicate the pool the bvecs came from. We add + * 1 to the actual index so that 0 indicates that there are no bvecs to be + * freed. */ -#define BIO_POOL_BITS (4) -#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) -#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS) -#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) -#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) +#define BVEC_POOL_BITS (4) +#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) +#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) #endif /* CONFIG_BLOCK */ /* * Request flags. For use in the cmd_flags field of struct request, and in - * bi_rw of struct bio. Note that some flags are only valid in either one. + * bi_opf of struct bio. Note that some flags are only valid in either one. */ enum rq_flag_bits { /* common flags */ - __REQ_WRITE, /* not set, read. set, write */ __REQ_FAILFAST_DEV, /* no driver retries of device errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ @@ -153,14 +150,11 @@ enum rq_flag_bits { __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_PRIO, /* boost priority in cfq */ - __REQ_DISCARD, /* request to discard sectors */ - __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ - __REQ_WRITE_SAME, /* write same block many times */ __REQ_NOIDLE, /* don't anticipate more IO after this one */ __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ - __REQ_FLUSH, /* request for cache flush */ + __REQ_PREFLUSH, /* request for cache flush */ /* bio only flags */ __REQ_RAHEAD, /* read ahead, can fail anytime */ @@ -191,31 +185,25 @@ enum rq_flag_bits { __REQ_NR_BITS, /* stops here */ }; -#define REQ_WRITE (1ULL << __REQ_WRITE) #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) -#define REQ_DISCARD (1ULL << __REQ_DISCARD) -#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) #define REQ_COMMON_MASK \ - (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ - REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ - REQ_SECURE | REQ_INTEGRITY) + (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ + REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) #define REQ_CLONE_MASK REQ_COMMON_MASK -#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) - /* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) @@ -233,15 +221,25 @@ enum rq_flag_bits { #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) -#define REQ_FLUSH (1ULL << __REQ_FLUSH) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) -#define REQ_SECURE (1ULL << __REQ_SECURE) #define REQ_PM (1ULL << __REQ_PM) #define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) +enum req_op { + REQ_OP_READ, + REQ_OP_WRITE, + REQ_OP_DISCARD, /* request to discard sectors */ + REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ + REQ_OP_WRITE_SAME, /* write same block many times */ + REQ_OP_FLUSH, /* request for cache flush */ +}; + +#define REQ_OP_BITS 3 + typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U #define BLK_QC_T_SHIFT 16 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 669e419d6234..e79055c8b577 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -47,7 +47,6 @@ struct pr_ops; */ #define BLKCG_MAX_POLS 2 -struct request; typedef void (rq_end_io_fn)(struct request *, int); #define BLK_RL_SYNCFULL (1U << 0) @@ -90,18 +89,17 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - unsigned long fifo_time; + u64 fifo_time; }; struct request_queue *q; struct blk_mq_ctx *mq_ctx; - u64 cmd_flags; + int cpu; unsigned cmd_type; + u64 cmd_flags; unsigned long atomic_flags; - int cpu; - /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ sector_t __sector; /* sector cursor */ @@ -200,6 +198,20 @@ struct request { struct request *next_rq; }; +#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) +#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) + +#define req_set_op(req, op) do { \ + WARN_ON(op >= (1 << REQ_OP_BITS)); \ + (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ + (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ +} while (0) + +#define req_set_op_attrs(req, op, flags) do { \ + req_set_op(req, op); \ + (req)->cmd_flags |= flags; \ +} while (0) + static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -433,8 +445,6 @@ struct request_queue { /* * for flush operations */ - unsigned int flush_flags; - unsigned int flush_not_queueable:1; struct blk_flush_queue *fq; struct list_head requeue_list; @@ -485,12 +495,16 @@ struct request_queue { #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ -#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ +#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ +#define QUEUE_FLAG_WC 23 /* Write back caching */ +#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ +#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ +#define QUEUE_FLAG_DAX 26 /* device supports DAX */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -578,8 +592,9 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) -#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ - test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) +#define blk_queue_secure_erase(q) \ + (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) +#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ @@ -596,7 +611,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) -#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) +#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) /* * Driver can handle struct request, if it either has an old style @@ -615,14 +630,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q) /* * We regard a request as sync, if either a read or a sync write */ -static inline bool rw_is_sync(unsigned int rw_flags) +static inline bool rw_is_sync(int op, unsigned int rw_flags) { - return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); + return op == REQ_OP_READ || (rw_flags & REQ_SYNC); } static inline bool rq_is_sync(struct request *rq) { - return rw_is_sync(rq->cmd_flags); + return rw_is_sync(req_op(rq), rq->cmd_flags); } static inline bool blk_rl_full(struct request_list *rl, bool sync) @@ -651,22 +666,10 @@ static inline bool rq_mergeable(struct request *rq) if (rq->cmd_type != REQ_TYPE_FS) return false; - if (rq->cmd_flags & REQ_NOMERGE_FLAGS) + if (req_op(rq) == REQ_OP_FLUSH) return false; - return true; -} - -static inline bool blk_check_merge_flags(unsigned int flags1, - unsigned int flags2) -{ - if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) - return false; - - if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) - return false; - - if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; return true; @@ -767,6 +770,17 @@ static inline void rq_flush_dcache_pages(struct request *rq) } #endif +#ifdef CONFIG_PRINTK +#define vfs_msg(sb, level, fmt, ...) \ + __vfs_msg(sb, level, fmt, ##__VA_ARGS__) +#else +#define vfs_msg(sb, level, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __vfs_msg(sb, "", " "); \ +} while (0) +#endif + extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); extern blk_qc_t generic_make_request(struct bio *bio); @@ -774,12 +788,10 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); -extern struct request *blk_make_request(struct request_queue *, struct bio *, - gfp_t); extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, - unsigned int len); + int offset, unsigned int len); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, @@ -788,6 +800,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, extern void blk_rq_unprep_clone(struct request *rq); extern int blk_insert_cloned_request(struct request_queue *q, struct request *rq); +extern int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_delay_queue(struct request_queue *, unsigned long); extern void blk_queue_split(struct request_queue *, struct bio **, struct bio_set *); @@ -867,12 +880,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) } static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, - unsigned int cmd_flags) + int op) { - if (unlikely(cmd_flags & REQ_DISCARD)) + if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) return min(q->limits.max_discard_sectors, UINT_MAX >> 9); - if (unlikely(cmd_flags & REQ_WRITE_SAME)) + if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; return q->limits.max_sectors; @@ -892,18 +905,21 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, (offset & (q->limits.chunk_sectors - 1)); } -static inline unsigned int blk_rq_get_max_sectors(struct request *rq) +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) { struct request_queue *q = rq->q; if (unlikely(rq->cmd_type != REQ_TYPE_FS)) return q->limits.max_hw_sectors; - if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) - return blk_queue_get_max_sectors(q, rq->cmd_flags); + if (!q->limits.chunk_sectors || + req_op(rq) == REQ_OP_DISCARD || + req_op(rq) == REQ_OP_SECURE_ERASE) + return blk_queue_get_max_sectors(q, req_op(rq)); - return min(blk_max_size_offset(q, blk_rq_pos(rq)), - blk_queue_get_max_sectors(q, rq->cmd_flags)); + return min(blk_max_size_offset(q, offset), + blk_queue_get_max_sectors(q, req_op(rq))); } static inline unsigned int blk_rq_count_bios(struct request *rq) @@ -1007,8 +1023,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); -extern void blk_queue_flush(struct request_queue *q, unsigned int flush); extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); +extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); @@ -1123,11 +1139,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, return bqt->tag_index[tag]; } -#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ + +#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ +#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); +extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int flags, + struct bio **biop); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, @@ -1363,7 +1384,7 @@ static inline unsigned int block_size(struct block_device *bdev) static inline bool queue_flush_queueable(struct request_queue *q) { - return !q->flush_not_queueable; + return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); } typedef struct {struct page *v;} Sector; @@ -1645,7 +1666,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req, */ struct blk_dax_ctl { sector_t sector; - void __pmem *addr; + void *addr; long size; pfn_t pfn; }; @@ -1653,11 +1674,11 @@ struct blk_dax_ctl { struct block_device_operations { int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); - int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); + int (*rw_page)(struct block_device *, sector_t, struct page *, bool); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - long (*direct_access)(struct block_device *, sector_t, void __pmem **, - pfn_t *); + long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, + long); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ @@ -1677,6 +1698,8 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); +extern int bdev_dax_supported(struct super_block *, int); +extern bool bdev_dax_capable(struct block_device *); #else /* CONFIG_BLOCK */ struct block_device; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index afc1343df3c7..cceb72f9e29f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -57,6 +57,14 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...); } while (0) #define BLK_TN_MAX_MSG 128 +static inline bool blk_trace_note_message_enabled(struct request_queue *q) +{ + struct blk_trace *bt = q->blk_trace; + if (likely(!bt)) + return false; + return bt->act_mask & BLK_TC_NOTIFY; +} + extern void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, @@ -79,6 +87,7 @@ extern struct attribute_group blk_trace_attr_group; # define blk_trace_remove(q) (-ENOTTY) # define blk_add_trace_msg(q, fmt, ...) do { } while (0) # define blk_trace_remove_sysfs(dev) do { } while (0) +# define blk_trace_note_message_enabled(q) (false) static inline int blk_trace_init_sysfs(struct device *dev) { return 0; @@ -109,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) } extern void blk_dump_cmd(char *buf, struct request *rq); -extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); +extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 35b22f94d2d2..f9be32691718 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -83,34 +83,34 @@ extern void *__alloc_bootmem(unsigned long size, unsigned long goal); extern void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *__alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, - unsigned long limit); + unsigned long limit) __malloc; extern void *__alloc_bootmem_low(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *__alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; #ifdef CONFIG_NO_BOOTMEM /* We are using top down, so it is safe to use 0 here */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f1d5c5acc8dd..11134238417d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -11,14 +11,17 @@ #include <linux/workqueue.h> #include <linux/file.h> #include <linux/percpu.h> +#include <linux/err.h> +struct perf_event; struct bpf_map; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ struct bpf_map *(*map_alloc)(union bpf_attr *attr); - void (*map_free)(struct bpf_map *); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); /* funcs callable from userspace and from eBPF programs */ @@ -27,8 +30,9 @@ struct bpf_map_ops { int (*map_delete_elem)(struct bpf_map *map, void *key); /* funcs called by prog_array and perf_event_array map */ - void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); - void (*map_fd_put_ptr) (void *ptr); + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); }; struct bpf_map { @@ -66,6 +70,11 @@ enum bpf_arg_type { * functions that access data on eBPF program stack */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ + ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not + * need to be initialized, helper function must fill + * all bytes or clear them in error case. + */ + ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ @@ -106,6 +115,31 @@ enum bpf_access_type { BPF_WRITE = 2 }; +/* types of values stored in eBPF registers */ +enum bpf_reg_type { + NOT_INIT = 0, /* nothing was written into register */ + UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ + PTR_TO_CTX, /* reg points to bpf_context */ + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ + PTR_TO_MAP_VALUE, /* reg points to map element value */ + PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ + FRAME_PTR, /* reg == frame_pointer */ + PTR_TO_STACK, /* reg == frame_pointer + imm */ + CONST_IMM, /* constant integer value */ + + /* PTR_TO_PACKET represents: + * skb->data + * skb->data + imm + * skb->data + (u16) var + * skb->data + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) menas that 'imm' was added + */ + PTR_TO_PACKET, + PTR_TO_PACKET_END, /* skb->data + headlen */ +}; + struct bpf_prog; struct bpf_verifier_ops { @@ -115,7 +149,8 @@ struct bpf_verifier_ops { /* return true if 'size' wide access at offset 'off' within bpf_context * with 'type' (read or write) is allowed */ - bool (*is_valid_access)(int off, int size, enum bpf_access_type type); + bool (*is_valid_access)(int off, int size, enum bpf_access_type type, + enum bpf_reg_type *reg_type); u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, @@ -131,6 +166,7 @@ struct bpf_prog_type_list { struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; + u32 max_ctx_offset; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; @@ -157,13 +193,29 @@ struct bpf_array { void __percpu *pptrs[0] __aligned(8); }; }; + #define MAX_TAIL_CALL_CNT 32 +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct rcu_head rcu; +}; + u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); -void bpf_fd_array_map_clear(struct bpf_map *map); +u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); + const struct bpf_func_proto *bpf_get_trace_printk_proto(void); +typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, + unsigned long off, unsigned long len); + +u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, + void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); + #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); @@ -171,9 +223,10 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); +struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i); struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); -void bpf_prog_put_rcu(struct bpf_prog *prog); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); @@ -196,8 +249,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, u64 flags); int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 flags); + int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); +int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +void bpf_fd_array_map_clear(struct bpf_map *map); + /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. * Best-effort only. No barriers here, since it _will_ race with concurrent @@ -226,6 +284,16 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd) return ERR_PTR(-EOPNOTSUPP); } +static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, + enum bpf_prog_type type) +{ + return ERR_PTR(-EOPNOTSUPP); +} +static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline void bpf_prog_put(struct bpf_prog *prog) { } diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index d48daa3f6f20..ebbacd14d450 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -187,12 +187,13 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh); -void ll_rw_block(int, int, struct buffer_head * bh[]); +void ll_rw_block(int, int, int, struct buffer_head * bh[]); int sync_dirty_buffer(struct buffer_head *bh); -int __sync_dirty_buffer(struct buffer_head *bh, int rw); -void write_dirty_buffer(struct buffer_head *bh, int rw); -int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags); -int submit_bh(int, struct buffer_head *); +int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); +void write_dirty_buffer(struct buffer_head *bh, int op_flags); +int _submit_bh(int op, int op_flags, struct buffer_head *bh, + unsigned long bio_flags); +int submit_bh(int, int, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); int bh_uptodate_or_lock(struct buffer_head *bh); @@ -208,6 +209,9 @@ void block_invalidatepage(struct page *page, unsigned int offset, unsigned int length); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); +int __block_write_full_page(struct inode *inode, struct page *page, + get_block_t *get_block, struct writeback_control *wbc, + bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); int block_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); diff --git a/include/linux/bvec.h b/include/linux/bvec.h new file mode 100644 index 000000000000..89b65b82d98f --- /dev/null +++ b/include/linux/bvec.h @@ -0,0 +1,97 @@ +/* + * bvec iterator + * + * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + */ +#ifndef __LINUX_BVEC_ITER_H +#define __LINUX_BVEC_ITER_H + +#include <linux/kernel.h> +#include <linux/bug.h> + +/* + * was unsigned short, but we might as well be ready for > 64kB I/O pages + */ +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bvec_iter { + sector_t bi_sector; /* device address in 512 byte + sectors */ + unsigned int bi_size; /* residual I/O count */ + + unsigned int bi_idx; /* current index into bvl_vec */ + + unsigned int bi_bvec_done; /* number of bytes completed in + current bvec */ +}; + +/* + * various member access, note that bio_data should of course not be used + * on highmem page vectors + */ +#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) + +#define bvec_iter_page(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_page) + +#define bvec_iter_len(bvec, iter) \ + min((iter).bi_size, \ + __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) + +#define bvec_iter_offset(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) + +#define bvec_iter_bvec(bvec, iter) \ +((struct bio_vec) { \ + .bv_page = bvec_iter_page((bvec), (iter)), \ + .bv_len = bvec_iter_len((bvec), (iter)), \ + .bv_offset = bvec_iter_offset((bvec), (iter)), \ +}) + +static inline void bvec_iter_advance(const struct bio_vec *bv, + struct bvec_iter *iter, + unsigned bytes) +{ + WARN_ONCE(bytes > iter->bi_size, + "Attempted to advance past end of bvec iter\n"); + + while (bytes) { + unsigned iter_len = bvec_iter_len(bv, *iter); + unsigned len = min(bytes, iter_len); + + bytes -= len; + iter->bi_size -= len; + iter->bi_bvec_done += len; + + if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { + iter->bi_bvec_done = 0; + iter->bi_idx++; + } + } +} + +#define for_each_bvec(bvl, bio_vec, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ + bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + +#endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 735f9f8c4e43..5f5270941ba0 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -32,6 +32,7 @@ enum can_mode { * CAN common private data */ struct can_priv { + struct net_device *dev; struct can_device_stats can_stats; struct can_bittiming bittiming, data_bittiming; @@ -40,11 +41,14 @@ struct can_priv { struct can_clock clock; enum can_state state; - u32 ctrlmode; - u32 ctrlmode_supported; + + /* CAN controller features - see include/uapi/linux/can/netlink.h */ + u32 ctrlmode; /* current options setting */ + u32 ctrlmode_supported; /* options that can be modified by netlink */ + u32 ctrlmode_static; /* static enabled options for driver/hardware */ int restart_ms; - struct timer_list restart_timer; + struct delayed_work restart_work; int (*do_set_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev); @@ -108,6 +112,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb) return skb->len == CANFD_MTU; } +/* helper to define static CAN controller features at device creation time */ +static inline void can_set_static_ctrlmode(struct net_device *dev, + u32 static_mode) +{ + struct can_priv *priv = netdev_priv(dev); + + /* alloc_candev() succeeded => netdev_priv() is valid at this point */ + priv->ctrlmode = static_mode; + priv->ctrlmode_static = static_mode; + + /* override MTU which was set by default in can_setup()? */ + if (static_mode & CAN_CTRLMODE_FD) + dev->mtu = CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); diff --git a/include/linux/capability.h b/include/linux/capability.h index 00690ff92edf..dbc21c719ce6 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -38,6 +38,7 @@ struct cpu_vfs_cap_data { struct file; struct inode; struct dentry; +struct task_struct; struct user_namespace; extern const kernel_cap_t __cap_empty_set; @@ -206,6 +207,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, struct user_namespace *ns, int cap); extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); +extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); #else static inline bool has_capability(struct task_struct *t, int cap) { @@ -233,6 +235,10 @@ static inline bool ns_capable(struct user_namespace *ns, int cap) { return true; } +static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) +{ + return true; +} #endif /* CONFIG_MULTIUSER */ extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 915af3095b39..7c2bb27c067c 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -1,9 +1,10 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> + * Author: Gary R Hook <gary.hook@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -381,6 +382,35 @@ struct ccp_passthru_engine { u32 final; }; +/** + * struct ccp_passthru_nomap_engine - CCP pass-through operation + * without performing DMA mapping + * @bit_mod: bitwise operation to perform + * @byte_swap: byteswap operation to perform + * @mask: mask to be applied to data + * @mask_len: length in bytes of mask + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicate final pass-through operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - bit_mod, byte_swap, src, dst, src_len + * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP + */ +struct ccp_passthru_nomap_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + + dma_addr_t mask; + u32 mask_len; /* In bytes */ + + dma_addr_t src_dma, dst_dma; + u64 src_len; /* In bytes */ + + u32 final; +}; + /***** ECC engine *****/ #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ #define CCP_ECC_MAX_OPERANDS 6 @@ -522,7 +552,8 @@ enum ccp_engine { }; /* Flag values for flags member of ccp_cmd */ -#define CCP_CMD_MAY_BACKLOG 0x00000001 +#define CCP_CMD_MAY_BACKLOG 0x00000001 +#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 /** * struct ccp_cmd - CPP operation request @@ -562,6 +593,7 @@ struct ccp_cmd { struct ccp_sha_engine sha; struct ccp_rsa_engine rsa; struct ccp_passthru_engine passthru; + struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; } u; diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h new file mode 100644 index 000000000000..138bbf721e70 --- /dev/null +++ b/include/linux/cec-funcs.h @@ -0,0 +1,1971 @@ +/* + * cec - HDMI Consumer Electronics Control message functions + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * Note: this framework is still in staging and it is likely the API + * will change before it goes out of staging. + * + * Once it is moved out of staging this header will move to uapi. + */ +#ifndef _CEC_UAPI_FUNCS_H +#define _CEC_UAPI_FUNCS_H + +#include <linux/cec.h> + +/* One Touch Play Feature */ +static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ACTIVE_SOURCE; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_active_source(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_image_view_on(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON; +} + +static inline void cec_msg_text_view_on(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TEXT_VIEW_ON; +} + + +/* Routing Control Feature */ +static inline void cec_msg_inactive_source(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_INACTIVE_SOURCE; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_inactive_source(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_request_active_source(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE; + msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0; +} + +static inline void cec_msg_routing_information(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ROUTING_INFORMATION; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_routing_information(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_routing_change(struct cec_msg *msg, + bool reply, + __u16 orig_phys_addr, + __u16 new_phys_addr) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ROUTING_CHANGE; + msg->msg[2] = orig_phys_addr >> 8; + msg->msg[3] = orig_phys_addr & 0xff; + msg->msg[4] = new_phys_addr >> 8; + msg->msg[5] = new_phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0; +} + +static inline void cec_ops_routing_change(const struct cec_msg *msg, + __u16 *orig_phys_addr, + __u16 *new_phys_addr) +{ + *orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *new_phys_addr = (msg->msg[4] << 8) | msg->msg[5]; +} + +static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_SET_STREAM_PATH; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_set_stream_path(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + + +/* Standby Feature */ +static inline void cec_msg_standby(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_STANDBY; +} + + +/* One Touch Record Feature */ +static inline void cec_msg_record_off(struct cec_msg *msg, bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_RECORD_OFF; + msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; +} + +struct cec_op_arib_data { + __u16 transport_id; + __u16 service_id; + __u16 orig_network_id; +}; + +struct cec_op_atsc_data { + __u16 transport_id; + __u16 program_number; +}; + +struct cec_op_dvb_data { + __u16 transport_id; + __u16 service_id; + __u16 orig_network_id; +}; + +struct cec_op_channel_data { + __u8 channel_number_fmt; + __u16 major; + __u16 minor; +}; + +struct cec_op_digital_service_id { + __u8 service_id_method; + __u8 dig_bcast_system; + union { + struct cec_op_arib_data arib; + struct cec_op_atsc_data atsc; + struct cec_op_dvb_data dvb; + struct cec_op_channel_data channel; + }; +}; + +struct cec_op_record_src { + __u8 type; + union { + struct cec_op_digital_service_id digital; + struct { + __u8 ana_bcast_type; + __u16 ana_freq; + __u8 bcast_system; + } analog; + struct { + __u8 plug; + } ext_plug; + struct { + __u16 phys_addr; + } ext_phys_addr; + }; +}; + +static inline void cec_set_digital_service_id(__u8 *msg, + const struct cec_op_digital_service_id *digital) +{ + *msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system; + if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { + *msg++ = (digital->channel.channel_number_fmt << 2) | + (digital->channel.major >> 8); + *msg++ = digital->channel.major & 0xff; + *msg++ = digital->channel.minor >> 8; + *msg++ = digital->channel.minor & 0xff; + *msg++ = 0; + *msg++ = 0; + return; + } + switch (digital->dig_bcast_system) { + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T: + *msg++ = digital->atsc.transport_id >> 8; + *msg++ = digital->atsc.transport_id & 0xff; + *msg++ = digital->atsc.program_number >> 8; + *msg++ = digital->atsc.program_number & 0xff; + *msg++ = 0; + *msg++ = 0; + break; + default: + *msg++ = digital->dvb.transport_id >> 8; + *msg++ = digital->dvb.transport_id & 0xff; + *msg++ = digital->dvb.service_id >> 8; + *msg++ = digital->dvb.service_id & 0xff; + *msg++ = digital->dvb.orig_network_id >> 8; + *msg++ = digital->dvb.orig_network_id & 0xff; + break; + } +} + +static inline void cec_get_digital_service_id(const __u8 *msg, + struct cec_op_digital_service_id *digital) +{ + digital->service_id_method = msg[0] >> 7; + digital->dig_bcast_system = msg[0] & 0x7f; + if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { + digital->channel.channel_number_fmt = msg[1] >> 2; + digital->channel.major = ((msg[1] & 3) << 6) | msg[2]; + digital->channel.minor = (msg[3] << 8) | msg[4]; + return; + } + digital->dvb.transport_id = (msg[1] << 8) | msg[2]; + digital->dvb.service_id = (msg[3] << 8) | msg[4]; + digital->dvb.orig_network_id = (msg[5] << 8) | msg[6]; +} + +static inline void cec_msg_record_on_own(struct cec_msg *msg) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_OWN; +} + +static inline void cec_msg_record_on_digital(struct cec_msg *msg, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 10; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL; + cec_set_digital_service_id(msg->msg + 3, digital); +} + +static inline void cec_msg_record_on_analog(struct cec_msg *msg, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 7; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG; + msg->msg[3] = ana_bcast_type; + msg->msg[4] = ana_freq >> 8; + msg->msg[5] = ana_freq & 0xff; + msg->msg[6] = bcast_system; +} + +static inline void cec_msg_record_on_plug(struct cec_msg *msg, + __u8 plug) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG; + msg->msg[3] = plug; +} + +static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 5; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR; + msg->msg[3] = phys_addr >> 8; + msg->msg[4] = phys_addr & 0xff; +} + +static inline void cec_msg_record_on(struct cec_msg *msg, + bool reply, + const struct cec_op_record_src *rec_src) +{ + switch (rec_src->type) { + case CEC_OP_RECORD_SRC_OWN: + cec_msg_record_on_own(msg); + break; + case CEC_OP_RECORD_SRC_DIGITAL: + cec_msg_record_on_digital(msg, &rec_src->digital); + break; + case CEC_OP_RECORD_SRC_ANALOG: + cec_msg_record_on_analog(msg, + rec_src->analog.ana_bcast_type, + rec_src->analog.ana_freq, + rec_src->analog.bcast_system); + break; + case CEC_OP_RECORD_SRC_EXT_PLUG: + cec_msg_record_on_plug(msg, rec_src->ext_plug.plug); + break; + case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: + cec_msg_record_on_phys_addr(msg, + rec_src->ext_phys_addr.phys_addr); + break; + } + msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; +} + +static inline void cec_ops_record_on(const struct cec_msg *msg, + struct cec_op_record_src *rec_src) +{ + rec_src->type = msg->msg[2]; + switch (rec_src->type) { + case CEC_OP_RECORD_SRC_OWN: + break; + case CEC_OP_RECORD_SRC_DIGITAL: + cec_get_digital_service_id(msg->msg + 3, &rec_src->digital); + break; + case CEC_OP_RECORD_SRC_ANALOG: + rec_src->analog.ana_bcast_type = msg->msg[3]; + rec_src->analog.ana_freq = + (msg->msg[4] << 8) | msg->msg[5]; + rec_src->analog.bcast_system = msg->msg[6]; + break; + case CEC_OP_RECORD_SRC_EXT_PLUG: + rec_src->ext_plug.plug = msg->msg[3]; + break; + case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: + rec_src->ext_phys_addr.phys_addr = + (msg->msg[3] << 8) | msg->msg[4]; + break; + } +} + +static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_RECORD_STATUS; + msg->msg[2] = rec_status; +} + +static inline void cec_ops_record_status(const struct cec_msg *msg, + __u8 *rec_status) +{ + *rec_status = msg->msg[2]; +} + +static inline void cec_msg_record_tv_screen(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN; + msg->reply = reply ? CEC_MSG_RECORD_ON : 0; +} + + +/* Timer Programming Feature */ +static inline void cec_msg_timer_status(struct cec_msg *msg, + __u8 timer_overlap_warning, + __u8 media_info, + __u8 prog_info, + __u8 prog_error, + __u8 duration_hr, + __u8 duration_min) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_TIMER_STATUS; + msg->msg[2] = (timer_overlap_warning << 7) | + (media_info << 5) | + (prog_info ? 0x10 : 0) | + (prog_info ? prog_info : prog_error); + if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || + prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || + prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { + msg->len += 2; + msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10); + } +} + +static inline void cec_ops_timer_status(const struct cec_msg *msg, + __u8 *timer_overlap_warning, + __u8 *media_info, + __u8 *prog_info, + __u8 *prog_error, + __u8 *duration_hr, + __u8 *duration_min) +{ + *timer_overlap_warning = msg->msg[2] >> 7; + *media_info = (msg->msg[2] >> 5) & 3; + if (msg->msg[2] & 0x10) { + *prog_info = msg->msg[2] & 0xf; + *prog_error = 0; + } else { + *prog_info = 0; + *prog_error = msg->msg[2] & 0xf; + } + if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || + *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || + *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { + *duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf); + *duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + } else { + *duration_hr = *duration_min = 0; + } +} + +static inline void cec_msg_timer_cleared_status(struct cec_msg *msg, + __u8 timer_cleared_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS; + msg->msg[2] = timer_cleared_status; +} + +static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg, + __u8 *timer_cleared_status) +{ + *timer_cleared_status = msg->msg[2]; +} + +static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ana_bcast_type; + msg->msg[10] = ana_freq >> 8; + msg->msg[11] = ana_freq & 0xff; + msg->msg[12] = bcast_system; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; +} + +static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ana_bcast_type = msg->msg[9]; + *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; + *bcast_system = msg->msg[12]; +} + +static inline void cec_msg_clear_digital_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 16; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; + msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + cec_set_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + struct cec_op_digital_service_id *digital) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + cec_get_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_msg_clear_ext_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ext_src_spec, + __u8 plug, + __u16 phys_addr) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ext_src_spec; + msg->msg[10] = plug; + msg->msg[11] = phys_addr >> 8; + msg->msg[12] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; +} + +static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ext_src_spec, + __u8 *plug, + __u16 *phys_addr) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ext_src_spec = msg->msg[9]; + *plug = msg->msg[10]; + *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; +} + +static inline void cec_msg_set_analogue_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ana_bcast_type; + msg->msg[10] = ana_freq >> 8; + msg->msg[11] = ana_freq & 0xff; + msg->msg[12] = bcast_system; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; +} + +static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ana_bcast_type = msg->msg[9]; + *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; + *bcast_system = msg->msg[12]; +} + +static inline void cec_msg_set_digital_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 16; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; + msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + cec_set_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_ops_set_digital_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + struct cec_op_digital_service_id *digital) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + cec_get_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_msg_set_ext_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ext_src_spec, + __u8 plug, + __u16 phys_addr) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_SET_EXT_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ext_src_spec; + msg->msg[10] = plug; + msg->msg[11] = phys_addr >> 8; + msg->msg[12] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; +} + +static inline void cec_ops_set_ext_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ext_src_spec, + __u8 *plug, + __u16 *phys_addr) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ext_src_spec = msg->msg[9]; + *plug = msg->msg[10]; + *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; +} + +static inline void cec_msg_set_timer_program_title(struct cec_msg *msg, + const char *prog_title) +{ + unsigned int len = strlen(prog_title); + + if (len > 14) + len = 14; + msg->len = 2 + len; + msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE; + memcpy(msg->msg + 2, prog_title, len); +} + +static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg, + char *prog_title) +{ + unsigned int len = msg->len > 2 ? msg->len - 2 : 0; + + if (len > 14) + len = 14; + memcpy(prog_title, msg->msg + 2, len); + prog_title[len] = '\0'; +} + +/* System Information Feature */ +static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_CEC_VERSION; + msg->msg[2] = cec_version; +} + +static inline void cec_ops_cec_version(const struct cec_msg *msg, + __u8 *cec_version) +{ + *cec_version = msg->msg[2]; +} + +static inline void cec_msg_get_cec_version(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GET_CEC_VERSION; + msg->reply = reply ? CEC_MSG_CEC_VERSION : 0; +} + +static inline void cec_msg_report_physical_addr(struct cec_msg *msg, + __u16 phys_addr, __u8 prim_devtype) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->msg[4] = prim_devtype; +} + +static inline void cec_ops_report_physical_addr(const struct cec_msg *msg, + __u16 *phys_addr, __u8 *prim_devtype) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *prim_devtype = msg->msg[4]; +} + +static inline void cec_msg_give_physical_addr(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR; + msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0; +} + +static inline void cec_msg_set_menu_language(struct cec_msg *msg, + const char *language) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE; + memcpy(msg->msg + 2, language, 3); +} + +static inline void cec_ops_set_menu_language(const struct cec_msg *msg, + char *language) +{ + memcpy(language, msg->msg + 2, 3); + language[3] = '\0'; +} + +static inline void cec_msg_get_menu_language(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE; + msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0; +} + +/* + * Assumes a single RC Profile byte and a single Device Features byte, + * i.e. no extended features are supported by this helper function. + * + * As of CEC 2.0 no extended features are defined, should those be added + * in the future, then this function needs to be adapted or a new function + * should be added. + */ +static inline void cec_msg_report_features(struct cec_msg *msg, + __u8 cec_version, __u8 all_device_types, + __u8 rc_profile, __u8 dev_features) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_FEATURES; + msg->msg[2] = cec_version; + msg->msg[3] = all_device_types; + msg->msg[4] = rc_profile; + msg->msg[5] = dev_features; +} + +static inline void cec_ops_report_features(const struct cec_msg *msg, + __u8 *cec_version, __u8 *all_device_types, + const __u8 **rc_profile, const __u8 **dev_features) +{ + const __u8 *p = &msg->msg[4]; + + *cec_version = msg->msg[2]; + *all_device_types = msg->msg[3]; + *rc_profile = p; + while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT)) + p++; + if (!(*p & CEC_OP_FEAT_EXT)) { + *dev_features = p + 1; + while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT)) + p++; + } + if (*p & CEC_OP_FEAT_EXT) + *rc_profile = *dev_features = NULL; +} + +static inline void cec_msg_give_features(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_FEATURES; + msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0; +} + +/* Deck Control Feature */ +static inline void cec_msg_deck_control(struct cec_msg *msg, + __u8 deck_control_mode) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_DECK_CONTROL; + msg->msg[2] = deck_control_mode; +} + +static inline void cec_ops_deck_control(const struct cec_msg *msg, + __u8 *deck_control_mode) +{ + *deck_control_mode = msg->msg[2]; +} + +static inline void cec_msg_deck_status(struct cec_msg *msg, + __u8 deck_info) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_DECK_STATUS; + msg->msg[2] = deck_info; +} + +static inline void cec_ops_deck_status(const struct cec_msg *msg, + __u8 *deck_info) +{ + *deck_info = msg->msg[2]; +} + +static inline void cec_msg_give_deck_status(struct cec_msg *msg, + bool reply, + __u8 status_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS; + msg->msg[2] = status_req; + msg->reply = reply ? CEC_MSG_DECK_STATUS : 0; +} + +static inline void cec_ops_give_deck_status(const struct cec_msg *msg, + __u8 *status_req) +{ + *status_req = msg->msg[2]; +} + +static inline void cec_msg_play(struct cec_msg *msg, + __u8 play_mode) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_PLAY; + msg->msg[2] = play_mode; +} + +static inline void cec_ops_play(const struct cec_msg *msg, + __u8 *play_mode) +{ + *play_mode = msg->msg[2]; +} + + +/* Tuner Control Feature */ +struct cec_op_tuner_device_info { + __u8 rec_flag; + __u8 tuner_display_info; + bool is_analog; + union { + struct cec_op_digital_service_id digital; + struct { + __u8 ana_bcast_type; + __u16 ana_freq; + __u8 bcast_system; + } analog; + }; +}; + +static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg, + __u8 rec_flag, + __u8 tuner_display_info, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 7; + msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; + msg->msg[2] = (rec_flag << 7) | tuner_display_info; + msg->msg[3] = ana_bcast_type; + msg->msg[4] = ana_freq >> 8; + msg->msg[5] = ana_freq & 0xff; + msg->msg[6] = bcast_system; +} + +static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg, + __u8 rec_flag, __u8 tuner_display_info, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 10; + msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; + msg->msg[2] = (rec_flag << 7) | tuner_display_info; + cec_set_digital_service_id(msg->msg + 3, digital); +} + +static inline void cec_msg_tuner_device_status(struct cec_msg *msg, + const struct cec_op_tuner_device_info *tuner_dev_info) +{ + if (tuner_dev_info->is_analog) + cec_msg_tuner_device_status_analog(msg, + tuner_dev_info->rec_flag, + tuner_dev_info->tuner_display_info, + tuner_dev_info->analog.ana_bcast_type, + tuner_dev_info->analog.ana_freq, + tuner_dev_info->analog.bcast_system); + else + cec_msg_tuner_device_status_digital(msg, + tuner_dev_info->rec_flag, + tuner_dev_info->tuner_display_info, + &tuner_dev_info->digital); +} + +static inline void cec_ops_tuner_device_status(const struct cec_msg *msg, + struct cec_op_tuner_device_info *tuner_dev_info) +{ + tuner_dev_info->is_analog = msg->len < 10; + tuner_dev_info->rec_flag = msg->msg[2] >> 7; + tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f; + if (tuner_dev_info->is_analog) { + tuner_dev_info->analog.ana_bcast_type = msg->msg[3]; + tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5]; + tuner_dev_info->analog.bcast_system = msg->msg[6]; + return; + } + cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital); +} + +static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg, + bool reply, + __u8 status_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS; + msg->msg[2] = status_req; + msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0; +} + +static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg, + __u8 *status_req) +{ + *status_req = msg->msg[2]; +} + +static inline void cec_msg_select_analogue_service(struct cec_msg *msg, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 6; + msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE; + msg->msg[2] = ana_bcast_type; + msg->msg[3] = ana_freq >> 8; + msg->msg[4] = ana_freq & 0xff; + msg->msg[5] = bcast_system; +} + +static inline void cec_ops_select_analogue_service(const struct cec_msg *msg, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *ana_bcast_type = msg->msg[2]; + *ana_freq = (msg->msg[3] << 8) | msg->msg[4]; + *bcast_system = msg->msg[5]; +} + +static inline void cec_msg_select_digital_service(struct cec_msg *msg, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 9; + msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE; + cec_set_digital_service_id(msg->msg + 2, digital); +} + +static inline void cec_ops_select_digital_service(const struct cec_msg *msg, + struct cec_op_digital_service_id *digital) +{ + cec_get_digital_service_id(msg->msg + 2, digital); +} + +static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT; +} + +static inline void cec_msg_tuner_step_increment(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT; +} + + +/* Vendor Specific Commands Feature */ +static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID; + msg->msg[2] = vendor_id >> 16; + msg->msg[3] = (vendor_id >> 8) & 0xff; + msg->msg[4] = vendor_id & 0xff; +} + +static inline void cec_ops_device_vendor_id(const struct cec_msg *msg, + __u32 *vendor_id) +{ + *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; +} + +static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID; + msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0; +} + +static inline void cec_msg_vendor_command(struct cec_msg *msg, + __u8 size, const __u8 *vendor_cmd) +{ + if (size > 14) + size = 14; + msg->len = 2 + size; + msg->msg[1] = CEC_MSG_VENDOR_COMMAND; + memcpy(msg->msg + 2, vendor_cmd, size); +} + +static inline void cec_ops_vendor_command(const struct cec_msg *msg, + __u8 *size, + const __u8 **vendor_cmd) +{ + *size = msg->len - 2; + + if (*size > 14) + *size = 14; + *vendor_cmd = msg->msg + 2; +} + +static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg, + __u32 vendor_id, __u8 size, + const __u8 *vendor_cmd) +{ + if (size > 11) + size = 11; + msg->len = 5 + size; + msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID; + msg->msg[2] = vendor_id >> 16; + msg->msg[3] = (vendor_id >> 8) & 0xff; + msg->msg[4] = vendor_id & 0xff; + memcpy(msg->msg + 5, vendor_cmd, size); +} + +static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg, + __u32 *vendor_id, __u8 *size, + const __u8 **vendor_cmd) +{ + *size = msg->len - 5; + + if (*size > 11) + *size = 11; + *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; + *vendor_cmd = msg->msg + 5; +} + +static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg, + __u8 size, + const __u8 *rc_code) +{ + if (size > 14) + size = 14; + msg->len = 2 + size; + msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN; + memcpy(msg->msg + 2, rc_code, size); +} + +static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg, + __u8 *size, + const __u8 **rc_code) +{ + *size = msg->len - 2; + + if (*size > 14) + *size = 14; + *rc_code = msg->msg + 2; +} + +static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP; +} + + +/* OSD Display Feature */ +static inline void cec_msg_set_osd_string(struct cec_msg *msg, + __u8 disp_ctl, + const char *osd) +{ + unsigned int len = strlen(osd); + + if (len > 13) + len = 13; + msg->len = 3 + len; + msg->msg[1] = CEC_MSG_SET_OSD_STRING; + msg->msg[2] = disp_ctl; + memcpy(msg->msg + 3, osd, len); +} + +static inline void cec_ops_set_osd_string(const struct cec_msg *msg, + __u8 *disp_ctl, + char *osd) +{ + unsigned int len = msg->len > 3 ? msg->len - 3 : 0; + + *disp_ctl = msg->msg[2]; + if (len > 13) + len = 13; + memcpy(osd, msg->msg + 3, len); + osd[len] = '\0'; +} + + +/* Device OSD Transfer Feature */ +static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name) +{ + unsigned int len = strlen(name); + + if (len > 14) + len = 14; + msg->len = 2 + len; + msg->msg[1] = CEC_MSG_SET_OSD_NAME; + memcpy(msg->msg + 2, name, len); +} + +static inline void cec_ops_set_osd_name(const struct cec_msg *msg, + char *name) +{ + unsigned int len = msg->len > 2 ? msg->len - 2 : 0; + + if (len > 14) + len = 14; + memcpy(name, msg->msg + 2, len); + name[len] = '\0'; +} + +static inline void cec_msg_give_osd_name(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_OSD_NAME; + msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0; +} + + +/* Device Menu Control Feature */ +static inline void cec_msg_menu_status(struct cec_msg *msg, + __u8 menu_state) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_MENU_STATUS; + msg->msg[2] = menu_state; +} + +static inline void cec_ops_menu_status(const struct cec_msg *msg, + __u8 *menu_state) +{ + *menu_state = msg->msg[2]; +} + +static inline void cec_msg_menu_request(struct cec_msg *msg, + bool reply, + __u8 menu_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_MENU_REQUEST; + msg->msg[2] = menu_req; + msg->reply = reply ? CEC_MSG_MENU_STATUS : 0; +} + +static inline void cec_ops_menu_request(const struct cec_msg *msg, + __u8 *menu_req) +{ + *menu_req = msg->msg[2]; +} + +struct cec_op_ui_command { + __u8 ui_cmd; + bool has_opt_arg; + union { + struct cec_op_channel_data channel_identifier; + __u8 ui_broadcast_type; + __u8 ui_sound_presentation_control; + __u8 play_mode; + __u8 ui_function_media; + __u8 ui_function_select_av_input; + __u8 ui_function_select_audio_input; + }; +}; + +static inline void cec_msg_user_control_pressed(struct cec_msg *msg, + const struct cec_op_ui_command *ui_cmd) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED; + msg->msg[2] = ui_cmd->ui_cmd; + if (!ui_cmd->has_opt_arg) + return; + switch (ui_cmd->ui_cmd) { + case 0x56: + case 0x57: + case 0x60: + case 0x68: + case 0x69: + case 0x6a: + /* The optional operand is one byte for all these ui commands */ + msg->len++; + msg->msg[3] = ui_cmd->play_mode; + break; + case 0x67: + msg->len += 4; + msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) | + (ui_cmd->channel_identifier.major >> 8); + msg->msg[4] = ui_cmd->channel_identifier.major & 0xff; + msg->msg[5] = ui_cmd->channel_identifier.minor >> 8; + msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff; + break; + } +} + +static inline void cec_ops_user_control_pressed(const struct cec_msg *msg, + struct cec_op_ui_command *ui_cmd) +{ + ui_cmd->ui_cmd = msg->msg[2]; + ui_cmd->has_opt_arg = false; + if (msg->len == 3) + return; + switch (ui_cmd->ui_cmd) { + case 0x56: + case 0x57: + case 0x60: + case 0x68: + case 0x69: + case 0x6a: + /* The optional operand is one byte for all these ui commands */ + ui_cmd->play_mode = msg->msg[3]; + ui_cmd->has_opt_arg = true; + break; + case 0x67: + if (msg->len < 7) + break; + ui_cmd->has_opt_arg = true; + ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2; + ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4]; + ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6]; + break; + } +} + +static inline void cec_msg_user_control_released(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED; +} + +/* Remote Control Passthrough Feature */ + +/* Power Status Feature */ +static inline void cec_msg_report_power_status(struct cec_msg *msg, + __u8 pwr_state) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS; + msg->msg[2] = pwr_state; +} + +static inline void cec_ops_report_power_status(const struct cec_msg *msg, + __u8 *pwr_state) +{ + *pwr_state = msg->msg[2]; +} + +static inline void cec_msg_give_device_power_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS; + msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0; +} + +/* General Protocol Messages */ +static inline void cec_msg_feature_abort(struct cec_msg *msg, + __u8 abort_msg, __u8 reason) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_FEATURE_ABORT; + msg->msg[2] = abort_msg; + msg->msg[3] = reason; +} + +static inline void cec_ops_feature_abort(const struct cec_msg *msg, + __u8 *abort_msg, __u8 *reason) +{ + *abort_msg = msg->msg[2]; + *reason = msg->msg[3]; +} + +/* This changes the current message into a feature abort message */ +static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason) +{ + cec_msg_set_reply_to(msg, msg); + msg->len = 4; + msg->msg[2] = msg->msg[1]; + msg->msg[3] = reason; + msg->msg[1] = CEC_MSG_FEATURE_ABORT; +} + +static inline void cec_msg_abort(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_ABORT; +} + + +/* System Audio Control Feature */ +static inline void cec_msg_report_audio_status(struct cec_msg *msg, + __u8 aud_mute_status, + __u8 aud_vol_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS; + msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f); +} + +static inline void cec_ops_report_audio_status(const struct cec_msg *msg, + __u8 *aud_mute_status, + __u8 *aud_vol_status) +{ + *aud_mute_status = msg->msg[2] >> 7; + *aud_vol_status = msg->msg[2] & 0x7f; +} + +static inline void cec_msg_give_audio_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS; + msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0; +} + +static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg, + __u8 sys_aud_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE; + msg->msg[2] = sys_aud_status; +} + +static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg, + __u8 *sys_aud_status) +{ + *sys_aud_status = msg->msg[2]; +} + +static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg, + bool reply, + __u16 phys_addr) +{ + msg->len = phys_addr == 0xffff ? 2 : 4; + msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0; + +} + +static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg, + __u16 *phys_addr) +{ + if (msg->len < 4) + *phys_addr = 0xffff; + else + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg, + __u8 sys_aud_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS; + msg->msg[2] = sys_aud_status; +} + +static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg, + __u8 *sys_aud_status) +{ + *sys_aud_status = msg->msg[2]; +} + +static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS; + msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0; +} + +static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg, + __u8 num_descriptors, + const __u32 *descriptors) +{ + unsigned int i; + + if (num_descriptors > 4) + num_descriptors = 4; + msg->len = 2 + num_descriptors * 3; + msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR; + for (i = 0; i < num_descriptors; i++) { + msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff; + msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff; + msg->msg[4 + i * 3] = descriptors[i] & 0xff; + } +} + +static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg, + __u8 *num_descriptors, + __u32 *descriptors) +{ + unsigned int i; + + *num_descriptors = (msg->len - 2) / 3; + if (*num_descriptors > 4) + *num_descriptors = 4; + for (i = 0; i < *num_descriptors; i++) + descriptors[i] = (msg->msg[2 + i * 3] << 16) | + (msg->msg[3 + i * 3] << 8) | + msg->msg[4 + i * 3]; +} + +static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg, + bool reply, + __u8 num_descriptors, + const __u8 *audio_format_id, + const __u8 *audio_format_code) +{ + unsigned int i; + + if (num_descriptors > 4) + num_descriptors = 4; + msg->len = 2 + num_descriptors; + msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR; + msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0; + for (i = 0; i < num_descriptors; i++) + msg->msg[2 + i] = (audio_format_id[i] << 6) | + (audio_format_code[i] & 0x3f); +} + +static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg, + __u8 *num_descriptors, + __u8 *audio_format_id, + __u8 *audio_format_code) +{ + unsigned int i; + + *num_descriptors = msg->len - 2; + if (*num_descriptors > 4) + *num_descriptors = 4; + for (i = 0; i < *num_descriptors; i++) { + audio_format_id[i] = msg->msg[2 + i] >> 6; + audio_format_code[i] = msg->msg[2 + i] & 0x3f; + } +} + + +/* Audio Rate Control Feature */ +static inline void cec_msg_set_audio_rate(struct cec_msg *msg, + __u8 audio_rate) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SET_AUDIO_RATE; + msg->msg[2] = audio_rate; +} + +static inline void cec_ops_set_audio_rate(const struct cec_msg *msg, + __u8 *audio_rate) +{ + *audio_rate = msg->msg[2]; +} + + +/* Audio Return Channel Control Feature */ +static inline void cec_msg_report_arc_initiated(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED; +} + +static inline void cec_msg_initiate_arc(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_INITIATE_ARC; + msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0; +} + +static inline void cec_msg_request_arc_initiation(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION; + msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0; +} + +static inline void cec_msg_report_arc_terminated(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED; +} + +static inline void cec_msg_terminate_arc(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TERMINATE_ARC; + msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0; +} + +static inline void cec_msg_request_arc_termination(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION; + msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0; +} + + +/* Dynamic Audio Lipsync Feature */ +/* Only for CEC 2.0 and up */ +static inline void cec_msg_report_current_latency(struct cec_msg *msg, + __u16 phys_addr, + __u8 video_latency, + __u8 low_latency_mode, + __u8 audio_out_compensated, + __u8 audio_out_delay) +{ + msg->len = 7; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->msg[4] = video_latency; + msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; + msg->msg[6] = audio_out_delay; +} + +static inline void cec_ops_report_current_latency(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *video_latency, + __u8 *low_latency_mode, + __u8 *audio_out_compensated, + __u8 *audio_out_delay) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *video_latency = msg->msg[4]; + *low_latency_mode = (msg->msg[5] >> 2) & 1; + *audio_out_compensated = msg->msg[5] & 3; + *audio_out_delay = msg->msg[6]; +} + +static inline void cec_msg_request_current_latency(struct cec_msg *msg, + bool reply, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0; +} + +static inline void cec_ops_request_current_latency(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + + +/* Capability Discovery and Control Feature */ +static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2) +{ + msg->len = 9; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; +} + +static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; +} + +static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg, + __u16 target_phys_addr, + __u8 hec_func_state, + __u8 host_func_state, + __u8 enc_func_state, + __u8 cdc_errcode, + __u8 has_field, + __u16 hec_field) +{ + msg->len = has_field ? 10 : 8; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE; + msg->msg[5] = target_phys_addr >> 8; + msg->msg[6] = target_phys_addr & 0xff; + msg->msg[7] = (hec_func_state << 6) | + (host_func_state << 4) | + (enc_func_state << 2) | + cdc_errcode; + if (has_field) { + msg->msg[8] = hec_field >> 8; + msg->msg[9] = hec_field & 0xff; + } +} + +static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *target_phys_addr, + __u8 *hec_func_state, + __u8 *host_func_state, + __u8 *enc_func_state, + __u8 *cdc_errcode, + __u8 *has_field, + __u16 *hec_field) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *target_phys_addr = (msg->msg[5] << 8) | msg->msg[6]; + *hec_func_state = msg->msg[7] >> 6; + *host_func_state = (msg->msg[7] >> 4) & 3; + *enc_func_state = (msg->msg[7] >> 4) & 3; + *cdc_errcode = msg->msg[7] & 3; + *has_field = msg->len >= 10; + *hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0; +} + +static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2, + __u8 hec_set_state, + __u16 phys_addr3, + __u16 phys_addr4, + __u16 phys_addr5) +{ + msg->len = 10; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; + msg->msg[9] = hec_set_state; + if (phys_addr3 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr3 >> 8; + msg->msg[msg->len++] = phys_addr3 & 0xff; + if (phys_addr4 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr4 >> 8; + msg->msg[msg->len++] = phys_addr4 & 0xff; + if (phys_addr5 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr5 >> 8; + msg->msg[msg->len++] = phys_addr5 & 0xff; + } + } + } +} + +static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2, + __u8 *hec_set_state, + __u16 *phys_addr3, + __u16 *phys_addr4, + __u16 *phys_addr5) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; + *hec_set_state = msg->msg[9]; + *phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID; + if (msg->len >= 12) + *phys_addr3 = (msg->msg[10] << 8) | msg->msg[11]; + if (msg->len >= 14) + *phys_addr4 = (msg->msg[12] << 8) | msg->msg[13]; + if (msg->len >= 16) + *phys_addr5 = (msg->msg[14] << 8) | msg->msg[15]; +} + +static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg, + __u16 phys_addr1, + __u8 hec_set_state) +{ + msg->len = 8; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = hec_set_state; +} + +static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u8 *hec_set_state) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *hec_set_state = msg->msg[7]; +} + +static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2, + __u16 phys_addr3) +{ + msg->len = 11; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; + msg->msg[9] = phys_addr3 >> 8; + msg->msg[10] = phys_addr3 & 0xff; +} + +static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2, + __u16 *phys_addr3) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; + *phys_addr3 = (msg->msg[9] << 8) | msg->msg[10]; +} + +static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE; +} + +static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER; +} + +static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg, + __u8 input_port, + __u8 hpd_state) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE; + msg->msg[5] = (input_port << 4) | hpd_state; +} + +static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *input_port, + __u8 *hpd_state) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *input_port = msg->msg[5] >> 4; + *hpd_state = msg->msg[5] & 0xf; +} + +static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg, + __u8 hpd_state, + __u8 hpd_error) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE; + msg->msg[5] = (hpd_state << 4) | hpd_error; +} + +static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *hpd_state, + __u8 *hpd_error) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *hpd_state = msg->msg[5] >> 4; + *hpd_error = msg->msg[5] & 0xf; +} + +#endif diff --git a/include/linux/cec.h b/include/linux/cec.h new file mode 100644 index 000000000000..851968e803fa --- /dev/null +++ b/include/linux/cec.h @@ -0,0 +1,1014 @@ +/* + * cec - HDMI Consumer Electronics Control public header + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * Note: this framework is still in staging and it is likely the API + * will change before it goes out of staging. + * + * Once it is moved out of staging this header will move to uapi. + */ +#ifndef _CEC_UAPI_H +#define _CEC_UAPI_H + +#include <linux/types.h> + +#define CEC_MAX_MSG_SIZE 16 + +/** + * struct cec_msg - CEC message structure. + * @tx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the + * driver when the message transmission has finished. + * @rx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the + * driver when the message was received. + * @len: Length in bytes of the message. + * @timeout: The timeout (in ms) that is used to timeout CEC_RECEIVE. + * Set to 0 if you want to wait forever. This timeout can also be + * used with CEC_TRANSMIT as the timeout for waiting for a reply. + * If 0, then it will use a 1 second timeout instead of waiting + * forever as is done with CEC_RECEIVE. + * @sequence: The framework assigns a sequence number to messages that are + * sent. This can be used to track replies to previously sent + * messages. + * @flags: Set to 0. + * @msg: The message payload. + * @reply: This field is ignored with CEC_RECEIVE and is only used by + * CEC_TRANSMIT. If non-zero, then wait for a reply with this + * opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for + * a possible ABORT reply. If there was an error when sending the + * msg or FeatureAbort was returned, then reply is set to 0. + * If reply is non-zero upon return, then len/msg are set to + * the received message. + * If reply is zero upon return and status has the + * CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to + * the received feature abort message. + * If reply is zero upon return and status has the + * CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at + * all. If reply is non-zero for CEC_TRANSMIT and the message is a + * broadcast, then -EINVAL is returned. + * if reply is non-zero, then timeout is set to 1000 (the required + * maximum response time). + * @rx_status: The message receive status bits. Set by the driver. + * @tx_status: The message transmit status bits. Set by the driver. + * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver. + * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver. + * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the + * driver. + * @tx_error_cnt: The number of 'Error' events. Set by the driver. + */ +struct cec_msg { + __u64 tx_ts; + __u64 rx_ts; + __u32 len; + __u32 timeout; + __u32 sequence; + __u32 flags; + __u8 msg[CEC_MAX_MSG_SIZE]; + __u8 reply; + __u8 rx_status; + __u8 tx_status; + __u8 tx_arb_lost_cnt; + __u8 tx_nack_cnt; + __u8 tx_low_drive_cnt; + __u8 tx_error_cnt; +}; + +/** + * cec_msg_initiator - return the initiator's logical address. + * @msg: the message structure + */ +static inline __u8 cec_msg_initiator(const struct cec_msg *msg) +{ + return msg->msg[0] >> 4; +} + +/** + * cec_msg_destination - return the destination's logical address. + * @msg: the message structure + */ +static inline __u8 cec_msg_destination(const struct cec_msg *msg) +{ + return msg->msg[0] & 0xf; +} + +/** + * cec_msg_opcode - return the opcode of the message, -1 for poll + * @msg: the message structure + */ +static inline int cec_msg_opcode(const struct cec_msg *msg) +{ + return msg->len > 1 ? msg->msg[1] : -1; +} + +/** + * cec_msg_is_broadcast - return true if this is a broadcast message. + * @msg: the message structure + */ +static inline bool cec_msg_is_broadcast(const struct cec_msg *msg) +{ + return (msg->msg[0] & 0xf) == 0xf; +} + +/** + * cec_msg_init - initialize the message structure. + * @msg: the message structure + * @initiator: the logical address of the initiator + * @destination:the logical address of the destination (0xf for broadcast) + * + * The whole structure is zeroed, the len field is set to 1 (i.e. a poll + * message) and the initiator and destination are filled in. + */ +static inline void cec_msg_init(struct cec_msg *msg, + __u8 initiator, __u8 destination) +{ + memset(msg, 0, sizeof(*msg)); + msg->msg[0] = (initiator << 4) | destination; + msg->len = 1; +} + +/** + * cec_msg_set_reply_to - fill in destination/initiator in a reply message. + * @msg: the message structure for the reply + * @orig: the original message structure + * + * Set the msg destination to the orig initiator and the msg initiator to the + * orig destination. Note that msg and orig may be the same pointer, in which + * case the change is done in place. + */ +static inline void cec_msg_set_reply_to(struct cec_msg *msg, + struct cec_msg *orig) +{ + /* The destination becomes the initiator and vice versa */ + msg->msg[0] = (cec_msg_destination(orig) << 4) | + cec_msg_initiator(orig); + msg->reply = msg->timeout = 0; +} + +/* cec status field */ +#define CEC_TX_STATUS_OK (1 << 0) +#define CEC_TX_STATUS_ARB_LOST (1 << 1) +#define CEC_TX_STATUS_NACK (1 << 2) +#define CEC_TX_STATUS_LOW_DRIVE (1 << 3) +#define CEC_TX_STATUS_ERROR (1 << 4) +#define CEC_TX_STATUS_MAX_RETRIES (1 << 5) + +#define CEC_RX_STATUS_OK (1 << 0) +#define CEC_RX_STATUS_TIMEOUT (1 << 1) +#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2) + +static inline bool cec_msg_status_is_ok(const struct cec_msg *msg) +{ + if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK)) + return false; + if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK)) + return false; + if (!msg->tx_status && !msg->rx_status) + return false; + return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT); +} + +#define CEC_LOG_ADDR_INVALID 0xff +#define CEC_PHYS_ADDR_INVALID 0xffff + +/* + * The maximum number of logical addresses one device can be assigned to. + * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The + * Analog Devices CEC hardware supports 3. So let's go wild and go for 4. + */ +#define CEC_MAX_LOG_ADDRS 4 + +/* The logical addresses defined by CEC 2.0 */ +#define CEC_LOG_ADDR_TV 0 +#define CEC_LOG_ADDR_RECORD_1 1 +#define CEC_LOG_ADDR_RECORD_2 2 +#define CEC_LOG_ADDR_TUNER_1 3 +#define CEC_LOG_ADDR_PLAYBACK_1 4 +#define CEC_LOG_ADDR_AUDIOSYSTEM 5 +#define CEC_LOG_ADDR_TUNER_2 6 +#define CEC_LOG_ADDR_TUNER_3 7 +#define CEC_LOG_ADDR_PLAYBACK_2 8 +#define CEC_LOG_ADDR_RECORD_3 9 +#define CEC_LOG_ADDR_TUNER_4 10 +#define CEC_LOG_ADDR_PLAYBACK_3 11 +#define CEC_LOG_ADDR_BACKUP_1 12 +#define CEC_LOG_ADDR_BACKUP_2 13 +#define CEC_LOG_ADDR_SPECIFIC 14 +#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */ +#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */ + +/* The logical address types that the CEC device wants to claim */ +#define CEC_LOG_ADDR_TYPE_TV 0 +#define CEC_LOG_ADDR_TYPE_RECORD 1 +#define CEC_LOG_ADDR_TYPE_TUNER 2 +#define CEC_LOG_ADDR_TYPE_PLAYBACK 3 +#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM 4 +#define CEC_LOG_ADDR_TYPE_SPECIFIC 5 +#define CEC_LOG_ADDR_TYPE_UNREGISTERED 6 +/* + * Switches should use UNREGISTERED. + * Processors should use SPECIFIC. + */ + +#define CEC_LOG_ADDR_MASK_TV (1 << CEC_LOG_ADDR_TV) +#define CEC_LOG_ADDR_MASK_RECORD ((1 << CEC_LOG_ADDR_RECORD_1) | \ + (1 << CEC_LOG_ADDR_RECORD_2) | \ + (1 << CEC_LOG_ADDR_RECORD_3)) +#define CEC_LOG_ADDR_MASK_TUNER ((1 << CEC_LOG_ADDR_TUNER_1) | \ + (1 << CEC_LOG_ADDR_TUNER_2) | \ + (1 << CEC_LOG_ADDR_TUNER_3) | \ + (1 << CEC_LOG_ADDR_TUNER_4)) +#define CEC_LOG_ADDR_MASK_PLAYBACK ((1 << CEC_LOG_ADDR_PLAYBACK_1) | \ + (1 << CEC_LOG_ADDR_PLAYBACK_2) | \ + (1 << CEC_LOG_ADDR_PLAYBACK_3)) +#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM (1 << CEC_LOG_ADDR_AUDIOSYSTEM) +#define CEC_LOG_ADDR_MASK_BACKUP ((1 << CEC_LOG_ADDR_BACKUP_1) | \ + (1 << CEC_LOG_ADDR_BACKUP_2)) +#define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC) +#define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED) + +static inline bool cec_has_tv(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_TV; +} + +static inline bool cec_has_record(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD; +} + +static inline bool cec_has_tuner(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER; +} + +static inline bool cec_has_playback(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK; +} + +static inline bool cec_has_audiosystem(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM; +} + +static inline bool cec_has_backup(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP; +} + +static inline bool cec_has_specific(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC; +} + +static inline bool cec_is_unregistered(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED; +} + +static inline bool cec_is_unconfigured(__u16 log_addr_mask) +{ + return log_addr_mask == 0; +} + +/* + * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID + * should be disabled (CEC_S_VENDOR_ID) + */ +#define CEC_VENDOR_ID_NONE 0xffffffff + +/* The message handling modes */ +/* Modes for initiator */ +#define CEC_MODE_NO_INITIATOR (0x0 << 0) +#define CEC_MODE_INITIATOR (0x1 << 0) +#define CEC_MODE_EXCL_INITIATOR (0x2 << 0) +#define CEC_MODE_INITIATOR_MSK 0x0f + +/* Modes for follower */ +#define CEC_MODE_NO_FOLLOWER (0x0 << 4) +#define CEC_MODE_FOLLOWER (0x1 << 4) +#define CEC_MODE_EXCL_FOLLOWER (0x2 << 4) +#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU (0x3 << 4) +#define CEC_MODE_MONITOR (0xe << 4) +#define CEC_MODE_MONITOR_ALL (0xf << 4) +#define CEC_MODE_FOLLOWER_MSK 0xf0 + +/* Userspace has to configure the physical address */ +#define CEC_CAP_PHYS_ADDR (1 << 0) +/* Userspace has to configure the logical addresses */ +#define CEC_CAP_LOG_ADDRS (1 << 1) +/* Userspace can transmit messages (and thus become follower as well) */ +#define CEC_CAP_TRANSMIT (1 << 2) +/* + * Passthrough all messages instead of processing them. + */ +#define CEC_CAP_PASSTHROUGH (1 << 3) +/* Supports remote control */ +#define CEC_CAP_RC (1 << 4) +/* Hardware can monitor all messages, not just directed and broadcast. */ +#define CEC_CAP_MONITOR_ALL (1 << 5) + +/** + * struct cec_caps - CEC capabilities structure. + * @driver: name of the CEC device driver. + * @name: name of the CEC device. @driver + @name must be unique. + * @available_log_addrs: number of available logical addresses. + * @capabilities: capabilities of the CEC adapter. + * @version: version of the CEC adapter framework. + */ +struct cec_caps { + char driver[32]; + char name[32]; + __u32 available_log_addrs; + __u32 capabilities; + __u32 version; +}; + +/** + * struct cec_log_addrs - CEC logical addresses structure. + * @log_addr: the claimed logical addresses. Set by the driver. + * @log_addr_mask: current logical address mask. Set by the driver. + * @cec_version: the CEC version that the adapter should implement. Set by the + * caller. + * @num_log_addrs: how many logical addresses should be claimed. Set by the + * caller. + * @vendor_id: the vendor ID of the device. Set by the caller. + * @flags: flags. + * @osd_name: the OSD name of the device. Set by the caller. + * @primary_device_type: the primary device type for each logical address. + * Set by the caller. + * @log_addr_type: the logical address types. Set by the caller. + * @all_device_types: CEC 2.0: all device types represented by the logical + * address. Set by the caller. + * @features: CEC 2.0: The logical address features. Set by the caller. + */ +struct cec_log_addrs { + __u8 log_addr[CEC_MAX_LOG_ADDRS]; + __u16 log_addr_mask; + __u8 cec_version; + __u8 num_log_addrs; + __u32 vendor_id; + __u32 flags; + char osd_name[15]; + __u8 primary_device_type[CEC_MAX_LOG_ADDRS]; + __u8 log_addr_type[CEC_MAX_LOG_ADDRS]; + + /* CEC 2.0 */ + __u8 all_device_types[CEC_MAX_LOG_ADDRS]; + __u8 features[CEC_MAX_LOG_ADDRS][12]; +}; + +/* Allow a fallback to unregistered */ +#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0) + +/* Events */ + +/* Event that occurs when the adapter state changes */ +#define CEC_EVENT_STATE_CHANGE 1 +/* + * This event is sent when messages are lost because the application + * didn't empty the message queue in time + */ +#define CEC_EVENT_LOST_MSGS 2 + +#define CEC_EVENT_FL_INITIAL_STATE (1 << 0) + +/** + * struct cec_event_state_change - used when the CEC adapter changes state. + * @phys_addr: the current physical address + * @log_addr_mask: the current logical address mask + */ +struct cec_event_state_change { + __u16 phys_addr; + __u16 log_addr_mask; +}; + +/** + * struct cec_event_lost_msgs - tells you how many messages were lost due. + * @lost_msgs: how many messages were lost. + */ +struct cec_event_lost_msgs { + __u32 lost_msgs; +}; + +/** + * struct cec_event - CEC event structure + * @ts: the timestamp of when the event was sent. + * @event: the event. + * array. + * @state_change: the event payload for CEC_EVENT_STATE_CHANGE. + * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS. + * @raw: array to pad the union. + */ +struct cec_event { + __u64 ts; + __u32 event; + __u32 flags; + union { + struct cec_event_state_change state_change; + struct cec_event_lost_msgs lost_msgs; + __u32 raw[16]; + }; +}; + +/* ioctls */ + +/* Adapter capabilities */ +#define CEC_ADAP_G_CAPS _IOWR('a', 0, struct cec_caps) + +/* + * phys_addr is either 0 (if this is the CEC root device) + * or a valid physical address obtained from the sink's EDID + * as read by this CEC device (if this is a source device) + * or a physical address obtained and modified from a sink + * EDID and used for a sink CEC device. + * If nothing is connected, then phys_addr is 0xffff. + * See HDMI 1.4b, section 8.7 (Physical Address). + * + * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled + * internally. + */ +#define CEC_ADAP_G_PHYS_ADDR _IOR('a', 1, __u16) +#define CEC_ADAP_S_PHYS_ADDR _IOW('a', 2, __u16) + +/* + * Configure the CEC adapter. It sets the device type and which + * logical types it will try to claim. It will return which + * logical addresses it could actually claim. + * An error is returned if the adapter is disabled or if there + * is no physical address assigned. + */ + +#define CEC_ADAP_G_LOG_ADDRS _IOR('a', 3, struct cec_log_addrs) +#define CEC_ADAP_S_LOG_ADDRS _IOWR('a', 4, struct cec_log_addrs) + +/* Transmit/receive a CEC command */ +#define CEC_TRANSMIT _IOWR('a', 5, struct cec_msg) +#define CEC_RECEIVE _IOWR('a', 6, struct cec_msg) + +/* Dequeue CEC events */ +#define CEC_DQEVENT _IOWR('a', 7, struct cec_event) + +/* + * Get and set the message handling mode for this filehandle. + */ +#define CEC_G_MODE _IOR('a', 8, __u32) +#define CEC_S_MODE _IOW('a', 9, __u32) + +/* + * The remainder of this header defines all CEC messages and operands. + * The format matters since it the cec-ctl utility parses it to generate + * code for implementing all these messages. + * + * Comments ending with 'Feature' group messages for each feature. + * If messages are part of multiple features, then the "Has also" + * comment is used to list the previously defined messages that are + * supported by the feature. + * + * Before operands are defined a comment is added that gives the + * name of the operand and in brackets the variable name of the + * corresponding argument in the cec-funcs.h function. + */ + +/* Messages */ + +/* One Touch Play Feature */ +#define CEC_MSG_ACTIVE_SOURCE 0x82 +#define CEC_MSG_IMAGE_VIEW_ON 0x04 +#define CEC_MSG_TEXT_VIEW_ON 0x0d + + +/* Routing Control Feature */ + +/* + * Has also: + * CEC_MSG_ACTIVE_SOURCE + */ + +#define CEC_MSG_INACTIVE_SOURCE 0x9d +#define CEC_MSG_REQUEST_ACTIVE_SOURCE 0x85 +#define CEC_MSG_ROUTING_CHANGE 0x80 +#define CEC_MSG_ROUTING_INFORMATION 0x81 +#define CEC_MSG_SET_STREAM_PATH 0x86 + + +/* Standby Feature */ +#define CEC_MSG_STANDBY 0x36 + + +/* One Touch Record Feature */ +#define CEC_MSG_RECORD_OFF 0x0b +#define CEC_MSG_RECORD_ON 0x09 +/* Record Source Type Operand (rec_src_type) */ +#define CEC_OP_RECORD_SRC_OWN 1 +#define CEC_OP_RECORD_SRC_DIGITAL 2 +#define CEC_OP_RECORD_SRC_ANALOG 3 +#define CEC_OP_RECORD_SRC_EXT_PLUG 4 +#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR 5 +/* Service Identification Method Operand (service_id_method) */ +#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID 0 +#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL 1 +/* Digital Service Broadcast System Operand (dig_bcast_system) */ +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN 0x00 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN 0x01 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN 0x02 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS 0x08 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS 0x09 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T 0x0a +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE 0x10 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT 0x11 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T 0x12 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C 0x18 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S 0x19 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2 0x1a +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T 0x1b +/* Analogue Broadcast Type Operand (ana_bcast_type) */ +#define CEC_OP_ANA_BCAST_TYPE_CABLE 0 +#define CEC_OP_ANA_BCAST_TYPE_SATELLITE 1 +#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL 2 +/* Broadcast System Operand (bcast_system) */ +#define CEC_OP_BCAST_SYSTEM_PAL_BG 0x00 +#define CEC_OP_BCAST_SYSTEM_SECAM_LQ 0x01 /* SECAM L' */ +#define CEC_OP_BCAST_SYSTEM_PAL_M 0x02 +#define CEC_OP_BCAST_SYSTEM_NTSC_M 0x03 +#define CEC_OP_BCAST_SYSTEM_PAL_I 0x04 +#define CEC_OP_BCAST_SYSTEM_SECAM_DK 0x05 +#define CEC_OP_BCAST_SYSTEM_SECAM_BG 0x06 +#define CEC_OP_BCAST_SYSTEM_SECAM_L 0x07 +#define CEC_OP_BCAST_SYSTEM_PAL_DK 0x08 +#define CEC_OP_BCAST_SYSTEM_OTHER 0x1f +/* Channel Number Format Operand (channel_number_fmt) */ +#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART 0x01 +#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART 0x02 + +#define CEC_MSG_RECORD_STATUS 0x0a +/* Record Status Operand (rec_status) */ +#define CEC_OP_RECORD_STATUS_CUR_SRC 0x01 +#define CEC_OP_RECORD_STATUS_DIG_SERVICE 0x02 +#define CEC_OP_RECORD_STATUS_ANA_SERVICE 0x03 +#define CEC_OP_RECORD_STATUS_EXT_INPUT 0x04 +#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE 0x05 +#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE 0x06 +#define CEC_OP_RECORD_STATUS_NO_SERVICE 0x07 +#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG 0x09 +#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR 0x0a +#define CEC_OP_RECORD_STATUS_UNSUP_CA 0x0b +#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS 0x0c +#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC 0x0d +#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES 0x0e +#define CEC_OP_RECORD_STATUS_NO_MEDIA 0x10 +#define CEC_OP_RECORD_STATUS_PLAYING 0x11 +#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING 0x12 +#define CEC_OP_RECORD_STATUS_MEDIA_PROT 0x13 +#define CEC_OP_RECORD_STATUS_NO_SIGNAL 0x14 +#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM 0x15 +#define CEC_OP_RECORD_STATUS_NO_SPACE 0x16 +#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK 0x17 +#define CEC_OP_RECORD_STATUS_TERMINATED_OK 0x1a +#define CEC_OP_RECORD_STATUS_ALREADY_TERM 0x1b +#define CEC_OP_RECORD_STATUS_OTHER 0x1f + +#define CEC_MSG_RECORD_TV_SCREEN 0x0f + + +/* Timer Programming Feature */ +#define CEC_MSG_CLEAR_ANALOGUE_TIMER 0x33 +/* Recording Sequence Operand (recording_seq) */ +#define CEC_OP_REC_SEQ_SUNDAY 0x01 +#define CEC_OP_REC_SEQ_MONDAY 0x02 +#define CEC_OP_REC_SEQ_TUESDAY 0x04 +#define CEC_OP_REC_SEQ_WEDNESDAY 0x08 +#define CEC_OP_REC_SEQ_THURSDAY 0x10 +#define CEC_OP_REC_SEQ_FRIDAY 0x20 +#define CEC_OP_REC_SEQ_SATERDAY 0x40 +#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00 + +#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99 + +#define CEC_MSG_CLEAR_EXT_TIMER 0xa1 +/* External Source Specifier Operand (ext_src_spec) */ +#define CEC_OP_EXT_SRC_PLUG 0x04 +#define CEC_OP_EXT_SRC_PHYS_ADDR 0x05 + +#define CEC_MSG_SET_ANALOGUE_TIMER 0x34 +#define CEC_MSG_SET_DIGITAL_TIMER 0x97 +#define CEC_MSG_SET_EXT_TIMER 0xa2 + +#define CEC_MSG_SET_TIMER_PROGRAM_TITLE 0x67 +#define CEC_MSG_TIMER_CLEARED_STATUS 0x43 +/* Timer Cleared Status Data Operand (timer_cleared_status) */ +#define CEC_OP_TIMER_CLR_STAT_RECORDING 0x00 +#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING 0x01 +#define CEC_OP_TIMER_CLR_STAT_NO_INFO 0x02 +#define CEC_OP_TIMER_CLR_STAT_CLEARED 0x80 + +#define CEC_MSG_TIMER_STATUS 0x35 +/* Timer Overlap Warning Operand (timer_overlap_warning) */ +#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP 0 +#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP 1 +/* Media Info Operand (media_info) */ +#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA 0 +#define CEC_OP_MEDIA_INFO_PROT_MEDIA 1 +#define CEC_OP_MEDIA_INFO_NO_MEDIA 2 +/* Programmed Indicator Operand (prog_indicator) */ +#define CEC_OP_PROG_IND_NOT_PROGRAMMED 0 +#define CEC_OP_PROG_IND_PROGRAMMED 1 +/* Programmed Info Operand (prog_info) */ +#define CEC_OP_PROG_INFO_ENOUGH_SPACE 0x08 +#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE 0x09 +#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE 0x0b +#define CEC_OP_PROG_INFO_NONE_AVAILABLE 0x0a +/* Not Programmed Error Info Operand (prog_error) */ +#define CEC_OP_PROG_ERROR_NO_FREE_TIMER 0x01 +#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE 0x02 +#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR 0x03 +#define CEC_OP_PROG_ERROR_INV_EXT_PLUG 0x04 +#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR 0x05 +#define CEC_OP_PROG_ERROR_CA_UNSUPP 0x06 +#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS 0x07 +#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP 0x08 +#define CEC_OP_PROG_ERROR_PARENTAL_LOCK 0x09 +#define CEC_OP_PROG_ERROR_CLOCK_FAILURE 0x0a +#define CEC_OP_PROG_ERROR_DUPLICATE 0x0e + + +/* System Information Feature */ +#define CEC_MSG_CEC_VERSION 0x9e +/* CEC Version Operand (cec_version) */ +#define CEC_OP_CEC_VERSION_1_3A 4 +#define CEC_OP_CEC_VERSION_1_4 5 +#define CEC_OP_CEC_VERSION_2_0 6 + +#define CEC_MSG_GET_CEC_VERSION 0x9f +#define CEC_MSG_GIVE_PHYSICAL_ADDR 0x83 +#define CEC_MSG_GET_MENU_LANGUAGE 0x91 +#define CEC_MSG_REPORT_PHYSICAL_ADDR 0x84 +/* Primary Device Type Operand (prim_devtype) */ +#define CEC_OP_PRIM_DEVTYPE_TV 0 +#define CEC_OP_PRIM_DEVTYPE_RECORD 1 +#define CEC_OP_PRIM_DEVTYPE_TUNER 3 +#define CEC_OP_PRIM_DEVTYPE_PLAYBACK 4 +#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM 5 +#define CEC_OP_PRIM_DEVTYPE_SWITCH 6 +#define CEC_OP_PRIM_DEVTYPE_PROCESSOR 7 + +#define CEC_MSG_SET_MENU_LANGUAGE 0x32 +#define CEC_MSG_REPORT_FEATURES 0xa6 /* HDMI 2.0 */ +/* All Device Types Operand (all_device_types) */ +#define CEC_OP_ALL_DEVTYPE_TV 0x80 +#define CEC_OP_ALL_DEVTYPE_RECORD 0x40 +#define CEC_OP_ALL_DEVTYPE_TUNER 0x20 +#define CEC_OP_ALL_DEVTYPE_PLAYBACK 0x10 +#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM 0x08 +#define CEC_OP_ALL_DEVTYPE_SWITCH 0x04 +/* + * And if you wondering what happened to PROCESSOR devices: those should + * be mapped to a SWITCH. + */ + +/* Valid for RC Profile and Device Feature operands */ +#define CEC_OP_FEAT_EXT 0x80 /* Extension bit */ +/* RC Profile Operand (rc_profile) */ +#define CEC_OP_FEAT_RC_TV_PROFILE_NONE 0x00 +#define CEC_OP_FEAT_RC_TV_PROFILE_1 0x02 +#define CEC_OP_FEAT_RC_TV_PROFILE_2 0x06 +#define CEC_OP_FEAT_RC_TV_PROFILE_3 0x0a +#define CEC_OP_FEAT_RC_TV_PROFILE_4 0x0e +#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU 0x50 +#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU 0x48 +#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU 0x44 +#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU 0x42 +#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU 0x41 +/* Device Feature Operand (dev_features) */ +#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN 0x40 +#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING 0x20 +#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL 0x10 +#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08 +#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04 +#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02 + +#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */ + + +/* Deck Control Feature */ +#define CEC_MSG_DECK_CONTROL 0x42 +/* Deck Control Mode Operand (deck_control_mode) */ +#define CEC_OP_DECK_CTL_MODE_SKIP_FWD 1 +#define CEC_OP_DECK_CTL_MODE_SKIP_REV 2 +#define CEC_OP_DECK_CTL_MODE_STOP 3 +#define CEC_OP_DECK_CTL_MODE_EJECT 4 + +#define CEC_MSG_DECK_STATUS 0x1b +/* Deck Info Operand (deck_info) */ +#define CEC_OP_DECK_INFO_PLAY 0x11 +#define CEC_OP_DECK_INFO_RECORD 0x12 +#define CEC_OP_DECK_INFO_PLAY_REV 0x13 +#define CEC_OP_DECK_INFO_STILL 0x14 +#define CEC_OP_DECK_INFO_SLOW 0x15 +#define CEC_OP_DECK_INFO_SLOW_REV 0x16 +#define CEC_OP_DECK_INFO_FAST_FWD 0x17 +#define CEC_OP_DECK_INFO_FAST_REV 0x18 +#define CEC_OP_DECK_INFO_NO_MEDIA 0x19 +#define CEC_OP_DECK_INFO_STOP 0x1a +#define CEC_OP_DECK_INFO_SKIP_FWD 0x1b +#define CEC_OP_DECK_INFO_SKIP_REV 0x1c +#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD 0x1d +#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV 0x1e +#define CEC_OP_DECK_INFO_OTHER 0x1f + +#define CEC_MSG_GIVE_DECK_STATUS 0x1a +/* Status Request Operand (status_req) */ +#define CEC_OP_STATUS_REQ_ON 1 +#define CEC_OP_STATUS_REQ_OFF 2 +#define CEC_OP_STATUS_REQ_ONCE 3 + +#define CEC_MSG_PLAY 0x41 +/* Play Mode Operand (play_mode) */ +#define CEC_OP_PLAY_MODE_PLAY_FWD 0x24 +#define CEC_OP_PLAY_MODE_PLAY_REV 0x20 +#define CEC_OP_PLAY_MODE_PLAY_STILL 0x25 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN 0x05 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED 0x06 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX 0x07 +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN 0x09 +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED 0x0a +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX 0x0b +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN 0x15 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED 0x16 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX 0x17 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN 0x19 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED 0x1a +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX 0x1b + + +/* Tuner Control Feature */ +#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS 0x08 +#define CEC_MSG_SELECT_ANALOGUE_SERVICE 0x92 +#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93 +#define CEC_MSG_TUNER_DEVICE_STATUS 0x07 +/* Recording Flag Operand (rec_flag) */ +#define CEC_OP_REC_FLAG_USED 0 +#define CEC_OP_REC_FLAG_NOT_USED 1 +/* Tuner Display Info Operand (tuner_display_info) */ +#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0 +#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1 +#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE 2 + +#define CEC_MSG_TUNER_STEP_DECREMENT 0x06 +#define CEC_MSG_TUNER_STEP_INCREMENT 0x05 + + +/* Vendor Specific Commands Feature */ + +/* + * Has also: + * CEC_MSG_CEC_VERSION + * CEC_MSG_GET_CEC_VERSION + */ +#define CEC_MSG_DEVICE_VENDOR_ID 0x87 +#define CEC_MSG_GIVE_DEVICE_VENDOR_ID 0x8c +#define CEC_MSG_VENDOR_COMMAND 0x89 +#define CEC_MSG_VENDOR_COMMAND_WITH_ID 0xa0 +#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN 0x8a +#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP 0x8b + + +/* OSD Display Feature */ +#define CEC_MSG_SET_OSD_STRING 0x64 +/* Display Control Operand (disp_ctl) */ +#define CEC_OP_DISP_CTL_DEFAULT 0x00 +#define CEC_OP_DISP_CTL_UNTIL_CLEARED 0x40 +#define CEC_OP_DISP_CTL_CLEAR 0x80 + + +/* Device OSD Transfer Feature */ +#define CEC_MSG_GIVE_OSD_NAME 0x46 +#define CEC_MSG_SET_OSD_NAME 0x47 + + +/* Device Menu Control Feature */ +#define CEC_MSG_MENU_REQUEST 0x8d +/* Menu Request Type Operand (menu_req) */ +#define CEC_OP_MENU_REQUEST_ACTIVATE 0x00 +#define CEC_OP_MENU_REQUEST_DEACTIVATE 0x01 +#define CEC_OP_MENU_REQUEST_QUERY 0x02 + +#define CEC_MSG_MENU_STATUS 0x8e +/* Menu State Operand (menu_state) */ +#define CEC_OP_MENU_STATE_ACTIVATED 0x00 +#define CEC_OP_MENU_STATE_DEACTIVATED 0x01 + +#define CEC_MSG_USER_CONTROL_PRESSED 0x44 +/* UI Broadcast Type Operand (ui_bcast_type) */ +#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00 +#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE 0x10 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T 0x20 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE 0x30 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT 0x40 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL 0x50 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T 0x60 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE 0x70 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT 0x80 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT 0x90 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2 0x91 +#define CEC_OP_UI_BCAST_TYPE_IP 0xa0 +/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */ +#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO 0x10 +#define CEC_OP_UI_SND_PRES_CTL_KARAOKE 0x20 +#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX 0x80 +#define CEC_OP_UI_SND_PRES_CTL_REVERB 0x90 +#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER 0xa0 +#define CEC_OP_UI_SND_PRES_CTL_BASS_UP 0xb1 +#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL 0xb2 +#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN 0xb3 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP 0xc1 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL 0xc2 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN 0xc3 + +#define CEC_MSG_USER_CONTROL_RELEASED 0x45 + + +/* Remote Control Passthrough Feature */ + +/* + * Has also: + * CEC_MSG_USER_CONTROL_PRESSED + * CEC_MSG_USER_CONTROL_RELEASED + */ + + +/* Power Status Feature */ +#define CEC_MSG_GIVE_DEVICE_POWER_STATUS 0x8f +#define CEC_MSG_REPORT_POWER_STATUS 0x90 +/* Power Status Operand (pwr_state) */ +#define CEC_OP_POWER_STATUS_ON 0 +#define CEC_OP_POWER_STATUS_STANDBY 1 +#define CEC_OP_POWER_STATUS_TO_ON 2 +#define CEC_OP_POWER_STATUS_TO_STANDBY 3 + + +/* General Protocol Messages */ +#define CEC_MSG_FEATURE_ABORT 0x00 +/* Abort Reason Operand (reason) */ +#define CEC_OP_ABORT_UNRECOGNIZED_OP 0 +#define CEC_OP_ABORT_INCORRECT_MODE 1 +#define CEC_OP_ABORT_NO_SOURCE 2 +#define CEC_OP_ABORT_INVALID_OP 3 +#define CEC_OP_ABORT_REFUSED 4 +#define CEC_OP_ABORT_UNDETERMINED 5 + +#define CEC_MSG_ABORT 0xff + + +/* System Audio Control Feature */ + +/* + * Has also: + * CEC_MSG_USER_CONTROL_PRESSED + * CEC_MSG_USER_CONTROL_RELEASED + */ +#define CEC_MSG_GIVE_AUDIO_STATUS 0x71 +#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS 0x7d +#define CEC_MSG_REPORT_AUDIO_STATUS 0x7a +/* Audio Mute Status Operand (aud_mute_status) */ +#define CEC_OP_AUD_MUTE_STATUS_OFF 0 +#define CEC_OP_AUD_MUTE_STATUS_ON 1 + +#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR 0xa3 +#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR 0xa4 +#define CEC_MSG_SET_SYSTEM_AUDIO_MODE 0x72 +/* System Audio Status Operand (sys_aud_status) */ +#define CEC_OP_SYS_AUD_STATUS_OFF 0 +#define CEC_OP_SYS_AUD_STATUS_ON 1 + +#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST 0x70 +#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS 0x7e +/* Audio Format ID Operand (audio_format_id) */ +#define CEC_OP_AUD_FMT_ID_CEA861 0 +#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1 + + +/* Audio Rate Control Feature */ +#define CEC_MSG_SET_AUDIO_RATE 0x9a +/* Audio Rate Operand (audio_rate) */ +#define CEC_OP_AUD_RATE_OFF 0 +#define CEC_OP_AUD_RATE_WIDE_STD 1 +#define CEC_OP_AUD_RATE_WIDE_FAST 2 +#define CEC_OP_AUD_RATE_WIDE_SLOW 3 +#define CEC_OP_AUD_RATE_NARROW_STD 4 +#define CEC_OP_AUD_RATE_NARROW_FAST 5 +#define CEC_OP_AUD_RATE_NARROW_SLOW 6 + + +/* Audio Return Channel Control Feature */ +#define CEC_MSG_INITIATE_ARC 0xc0 +#define CEC_MSG_REPORT_ARC_INITIATED 0xc1 +#define CEC_MSG_REPORT_ARC_TERMINATED 0xc2 +#define CEC_MSG_REQUEST_ARC_INITIATION 0xc3 +#define CEC_MSG_REQUEST_ARC_TERMINATION 0xc4 +#define CEC_MSG_TERMINATE_ARC 0xc5 + + +/* Dynamic Audio Lipsync Feature */ +/* Only for CEC 2.0 and up */ +#define CEC_MSG_REQUEST_CURRENT_LATENCY 0xa7 +#define CEC_MSG_REPORT_CURRENT_LATENCY 0xa8 +/* Low Latency Mode Operand (low_latency_mode) */ +#define CEC_OP_LOW_LATENCY_MODE_OFF 0 +#define CEC_OP_LOW_LATENCY_MODE_ON 1 +/* Audio Output Compensated Operand (audio_out_compensated) */ +#define CEC_OP_AUD_OUT_COMPENSATED_NA 0 +#define CEC_OP_AUD_OUT_COMPENSATED_DELAY 1 +#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY 2 +#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY 3 + + +/* Capability Discovery and Control Feature */ +#define CEC_MSG_CDC_MESSAGE 0xf8 +/* Ethernet-over-HDMI: nobody ever does this... */ +#define CEC_MSG_CDC_HEC_INQUIRE_STATE 0x00 +#define CEC_MSG_CDC_HEC_REPORT_STATE 0x01 +/* HEC Functionality State Operand (hec_func_state) */ +#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED 0 +#define CEC_OP_HEC_FUNC_STATE_INACTIVE 1 +#define CEC_OP_HEC_FUNC_STATE_ACTIVE 2 +#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD 3 +/* Host Functionality State Operand (host_func_state) */ +#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED 0 +#define CEC_OP_HOST_FUNC_STATE_INACTIVE 1 +#define CEC_OP_HOST_FUNC_STATE_ACTIVE 2 +/* ENC Functionality State Operand (enc_func_state) */ +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED 0 +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE 1 +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE 2 +/* CDC Error Code Operand (cdc_errcode) */ +#define CEC_OP_CDC_ERROR_CODE_NONE 0 +#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED 1 +#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE 2 +#define CEC_OP_CDC_ERROR_CODE_OTHER 3 +/* HEC Support Operand (hec_support) */ +#define CEC_OP_HEC_SUPPORT_NO 0 +#define CEC_OP_HEC_SUPPORT_YES 1 +/* HEC Activation Operand (hec_activation) */ +#define CEC_OP_HEC_ACTIVATION_ON 0 +#define CEC_OP_HEC_ACTIVATION_OFF 1 + +#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT 0x02 +#define CEC_MSG_CDC_HEC_SET_STATE 0x03 +/* HEC Set State Operand (hec_set_state) */ +#define CEC_OP_HEC_SET_STATE_DEACTIVATE 0 +#define CEC_OP_HEC_SET_STATE_ACTIVATE 1 + +#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION 0x04 +#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE 0x05 +#define CEC_MSG_CDC_HEC_DISCOVER 0x06 +/* Hotplug Detect messages */ +#define CEC_MSG_CDC_HPD_SET_STATE 0x10 +/* HPD State Operand (hpd_state) */ +#define CEC_OP_HPD_STATE_CP_EDID_DISABLE 0 +#define CEC_OP_HPD_STATE_CP_EDID_ENABLE 1 +#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE 2 +#define CEC_OP_HPD_STATE_EDID_DISABLE 3 +#define CEC_OP_HPD_STATE_EDID_ENABLE 4 +#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE 5 +#define CEC_MSG_CDC_HPD_REPORT_STATE 0x11 +/* HPD Error Code Operand (hpd_error) */ +#define CEC_OP_HPD_ERROR_NONE 0 +#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE 1 +#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE 2 +#define CEC_OP_HPD_ERROR_OTHER 3 +#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4 + +#endif diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h index b827e066e55a..146507df8650 100644 --- a/include/linux/ceph/ceph_frag.h +++ b/include/linux/ceph/ceph_frag.h @@ -51,11 +51,11 @@ static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) return ceph_frag_make(newbits, ceph_frag_value(f) | (i << (24 - newbits))); } -static inline int ceph_frag_is_leftmost(__u32 f) +static inline bool ceph_frag_is_leftmost(__u32 f) { return ceph_frag_value(f) == 0; } -static inline int ceph_frag_is_rightmost(__u32 f) +static inline bool ceph_frag_is_rightmost(__u32 f) { return ceph_frag_value(f) == ceph_frag_mask(f); } diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 37f28bf55ce4..7868d602c0a0 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -34,9 +34,9 @@ #define CEPH_MAX_MON 31 /* - * ceph_file_layout - describe data layout for a file/inode + * legacy ceph_file_layoute */ -struct ceph_file_layout { +struct ceph_file_layout_legacy { /* file -> object mapping */ __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple of page size. */ @@ -53,32 +53,26 @@ struct ceph_file_layout { __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ } __attribute__ ((packed)); -#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit)) -#define ceph_file_layout_stripe_count(l) \ - ((__s32)le32_to_cpu((l).fl_stripe_count)) -#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size)) -#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash)) -#define ceph_file_layout_object_su(l) \ - ((__s32)le32_to_cpu((l).fl_object_stripe_unit)) -#define ceph_file_layout_pg_pool(l) \ - ((__s32)le32_to_cpu((l).fl_pg_pool)) - -static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l) -{ - return le32_to_cpu(l->fl_stripe_unit) * - le32_to_cpu(l->fl_stripe_count); -} - -/* "period" == bytes before i start on a new set of objects */ -static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l) -{ - return le32_to_cpu(l->fl_object_size) * - le32_to_cpu(l->fl_stripe_count); -} +struct ceph_string; +/* + * ceph_file_layout - describe data layout for a file/inode + */ +struct ceph_file_layout { + /* file -> object mapping */ + u32 stripe_unit; /* stripe unit, in bytes */ + u32 stripe_count; /* over this many objects */ + u32 object_size; /* until objects are this big */ + s64 pool_id; /* rados pool id */ + struct ceph_string __rcu *pool_ns; /* rados pool namespace */ +}; -#define CEPH_MIN_STRIPE_UNIT 65536 +extern int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); +extern void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, + struct ceph_file_layout_legacy *legacy); +extern void ceph_file_layout_to_legacy(struct ceph_file_layout *fl, + struct ceph_file_layout_legacy *legacy); -int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); +#define CEPH_MIN_STRIPE_UNIT 65536 struct ceph_dir_layout { __u8 dl_dir_hash; /* see ceph_hash.h for ids */ @@ -127,6 +121,7 @@ struct ceph_dir_layout { /* client <-> mds */ #define CEPH_MSG_MDS_MAP 21 +#define CEPH_MSG_FS_MAP_USER 103 #define CEPH_MSG_CLIENT_SESSION 22 #define CEPH_MSG_CLIENT_RECONNECT 23 @@ -153,8 +148,9 @@ struct ceph_dir_layout { /* watch-notify operations */ enum { - WATCH_NOTIFY = 1, /* notifying watcher */ - WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */ + CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */ + CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */ + CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */ }; @@ -207,6 +203,8 @@ struct ceph_mon_subscribe_ack { struct ceph_fsid fsid; } __attribute__ ((packed)); +#define CEPH_FS_CLUSTER_ID_NONE -1 + /* * mdsmap flags */ @@ -344,6 +342,18 @@ extern const char *ceph_mds_op_name(int op); #define CEPH_XATTR_REPLACE (1 << 1) #define CEPH_XATTR_REMOVE (1 << 31) +/* + * readdir request flags; + */ +#define CEPH_READDIR_REPLY_BITFLAGS (1<<0) + +/* + * readdir reply flags. + */ +#define CEPH_READDIR_FRAG_END (1<<0) +#define CEPH_READDIR_FRAG_COMPLETE (1<<8) +#define CEPH_READDIR_HASH_ORDER (1<<9) + union ceph_mds_request_args { struct { __le32 mask; /* CEPH_CAP_* */ @@ -361,6 +371,7 @@ union ceph_mds_request_args { __le32 frag; /* which dir fragment */ __le32 max_entries; /* how many dentries to grab */ __le32 max_bytes; + __le16 flags; } __attribute__ ((packed)) readdir; struct { __le32 mode; @@ -383,7 +394,7 @@ union ceph_mds_request_args { __le32 flags; } __attribute__ ((packed)) setxattr; struct { - struct ceph_file_layout layout; + struct ceph_file_layout_legacy layout; } __attribute__ ((packed)) setlayout; struct { __u8 rule; /* currently fcntl or flock */ @@ -462,7 +473,7 @@ struct ceph_mds_reply_inode { __le64 version; /* inode version */ __le64 xattr_version; /* version for xattr blob */ struct ceph_mds_reply_cap cap; /* caps issued for this inode */ - struct ceph_file_layout layout; + struct ceph_file_layout_legacy layout; struct ceph_timespec ctime, mtime, atime; __le32 time_warp_seq; __le64 size, max_size, truncate_size; @@ -515,7 +526,7 @@ struct ceph_filelock { #define CEPH_FILE_MODE_WR 2 #define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ #define CEPH_FILE_MODE_LAZY 4 /* lazy io */ -#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */ +#define CEPH_FILE_MODE_BITS 4 int ceph_flags_to_mode(int flags); @@ -657,7 +668,7 @@ struct ceph_mds_caps { __le64 size, max_size, truncate_size; __le32 truncate_seq; struct ceph_timespec mtime, atime, ctime; - struct ceph_file_layout layout; + struct ceph_file_layout_legacy layout; __le32 time_warp_seq; } __attribute__ ((packed)); diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index a6ef9cc267ec..f990f2cc907a 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -3,6 +3,7 @@ #include <linux/err.h> #include <linux/bug.h> +#include <linux/slab.h> #include <linux/time.h> #include <asm/unaligned.h> @@ -47,7 +48,7 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n) /* * bounds check input. */ -static inline int ceph_has_room(void **p, void *end, size_t n) +static inline bool ceph_has_room(void **p, void *end, size_t n) { return end >= *p && n <= end - *p; } @@ -217,6 +218,60 @@ static inline void ceph_encode_string(void **p, void *end, *p += len; } +/* + * version and length starting block encoders/decoders + */ + +/* current code version (u8) + compat code version (u8) + len of struct (u32) */ +#define CEPH_ENCODING_START_BLK_LEN 6 + +/** + * ceph_start_encoding - start encoding block + * @struct_v: current (code) version of the encoding + * @struct_compat: oldest code version that can decode it + * @struct_len: length of struct encoding + */ +static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat, + u32 struct_len) +{ + ceph_encode_8(p, struct_v); + ceph_encode_8(p, struct_compat); + ceph_encode_32(p, struct_len); +} + +/** + * ceph_start_decoding - start decoding block + * @v: current version of the encoding that the code supports + * @name: name of the struct (free-form) + * @struct_v: out param for the encoding version + * @struct_len: out param for the length of struct encoding + * + * Validates the length of struct encoding, so unsafe ceph_decode_* + * variants can be used for decoding. + */ +static inline int ceph_start_decoding(void **p, void *end, u8 v, + const char *name, u8 *struct_v, + u32 *struct_len) +{ + u8 struct_compat; + + ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad); + *struct_v = ceph_decode_8(p); + struct_compat = ceph_decode_8(p); + if (v < struct_compat) { + pr_warn("got struct_v %d struct_compat %d > %d of %s\n", + *struct_v, struct_compat, v, name); + return -EINVAL; + } + + *struct_len = ceph_decode_32(p); + ceph_decode_need(p, end, *struct_len, bad); + return 0; + +bad: + return -ERANGE; +} + #define ceph_encode_need(p, end, n, bad) \ do { \ if (!likely(ceph_has_room(p, end, n))) \ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index db92a8d4926e..83fc1fff7061 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -21,6 +21,7 @@ #include <linux/ceph/mon_client.h> #include <linux/ceph/osd_client.h> #include <linux/ceph/ceph_fs.h> +#include <linux/ceph/string_table.h> /* * mount options @@ -180,6 +181,64 @@ static inline int calc_pages_for(u64 off, u64 len) (off >> PAGE_SHIFT); } +/* + * These are not meant to be generic - an integer key is assumed. + */ +#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +static void insert_##name(struct rb_root *root, type *t) \ +{ \ + struct rb_node **n = &root->rb_node; \ + struct rb_node *parent = NULL; \ + \ + BUG_ON(!RB_EMPTY_NODE(&t->nodefld)); \ + \ + while (*n) { \ + type *cur = rb_entry(*n, type, nodefld); \ + \ + parent = *n; \ + if (t->keyfld < cur->keyfld) \ + n = &(*n)->rb_left; \ + else if (t->keyfld > cur->keyfld) \ + n = &(*n)->rb_right; \ + else \ + BUG(); \ + } \ + \ + rb_link_node(&t->nodefld, parent, n); \ + rb_insert_color(&t->nodefld, root); \ +} \ +static void erase_##name(struct rb_root *root, type *t) \ +{ \ + BUG_ON(RB_EMPTY_NODE(&t->nodefld)); \ + rb_erase(&t->nodefld, root); \ + RB_CLEAR_NODE(&t->nodefld); \ +} + +#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \ +extern type __lookup_##name##_key; \ +static type *lookup_##name(struct rb_root *root, \ + typeof(__lookup_##name##_key.keyfld) key) \ +{ \ + struct rb_node *n = root->rb_node; \ + \ + while (n) { \ + type *cur = rb_entry(n, type, nodefld); \ + \ + if (key < cur->keyfld) \ + n = n->rb_left; \ + else if (key > cur->keyfld) \ + n = n->rb_right; \ + else \ + return cur; \ + } \ + \ + return NULL; \ +} + +#define DEFINE_RB_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) + extern struct kmem_cache *ceph_inode_cachep; extern struct kmem_cache *ceph_cap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index e230e7ed60d3..24d704d1ea5c 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -39,20 +39,31 @@ struct ceph_mon_request { ceph_monc_request_func_t do_request; }; +typedef void (*ceph_monc_callback_t)(struct ceph_mon_generic_request *); + /* * ceph_mon_generic_request is being used for the statfs and * mon_get_version requests which are being done a bit differently * because we need to get data back to the caller */ struct ceph_mon_generic_request { + struct ceph_mon_client *monc; struct kref kref; u64 tid; struct rb_node node; int result; - void *buf; + struct completion completion; + ceph_monc_callback_t complete_cb; + u64 private_data; /* r_tid/linger_id */ + struct ceph_msg *request; /* original request */ struct ceph_msg *reply; /* and reply */ + + union { + struct ceph_statfs *st; + u64 newest; + } u; }; struct ceph_mon_client { @@ -77,7 +88,6 @@ struct ceph_mon_client { /* pending generic requests */ struct rb_root generic_request_tree; - int num_generic_requests; u64 last_tid; /* subs, indexed with CEPH_SUB_* */ @@ -85,7 +95,8 @@ struct ceph_mon_client { struct ceph_mon_subscribe_item item; bool want; u32 have; /* epoch */ - } subs[3]; + } subs[4]; + int fs_cluster_id; /* "mdsmap.<id>" sub */ #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_file; @@ -100,9 +111,10 @@ extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); extern void ceph_monc_stop(struct ceph_mon_client *monc); enum { - CEPH_SUB_MDSMAP = 0, - CEPH_SUB_MONMAP, + CEPH_SUB_MONMAP = 0, CEPH_SUB_OSDMAP, + CEPH_SUB_FSMAP, + CEPH_SUB_MDSMAP, }; extern const char *ceph_sub_str[]; @@ -116,16 +128,18 @@ extern const char *ceph_sub_str[]; bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, bool continuous); void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch); +void ceph_monc_renew_subs(struct ceph_mon_client *monc); -extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, unsigned long timeout); extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf); -extern int ceph_monc_do_get_version(struct ceph_mon_client *monc, - const char *what, u64 *newest); +int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, + u64 *newest); +int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, + ceph_monc_callback_t cb, u64 private_data); extern int ceph_monc_open_session(struct ceph_mon_client *monc); diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h index 4b0d38960726..ddd0d48d0384 100644 --- a/include/linux/ceph/msgpool.h +++ b/include/linux/ceph/msgpool.h @@ -2,7 +2,6 @@ #define _FS_CEPH_MSGPOOL #include <linux/mempool.h> -#include <linux/ceph/messenger.h> /* * we use memory pools for preallocating messages we may receive, to diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index cbf460927c42..858932304260 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -9,6 +9,7 @@ #include <linux/ceph/types.h> #include <linux/ceph/osdmap.h> #include <linux/ceph/messenger.h> +#include <linux/ceph/msgpool.h> #include <linux/ceph/auth.h> #include <linux/ceph/pagelist.h> @@ -20,10 +21,11 @@ struct ceph_osd_client; /* * completion callback for async writepages */ -typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, - struct ceph_msg *); +typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); +#define CEPH_HOMELESS_OSD -1 + /* a given osd we're communicating with */ struct ceph_osd { atomic_t o_ref; @@ -32,16 +34,15 @@ struct ceph_osd { int o_incarnation; struct rb_node o_node; struct ceph_connection o_con; - struct list_head o_requests; - struct list_head o_linger_requests; + struct rb_root o_requests; + struct rb_root o_linger_requests; struct list_head o_osd_lru; struct ceph_auth_handshake o_auth; unsigned long lru_ttl; - int o_marked_for_keepalive; struct list_head o_keepalive_item; + struct mutex lock; }; - #define CEPH_OSD_SLAB_OPS 2 #define CEPH_OSD_MAX_OPS 16 @@ -104,76 +105,95 @@ struct ceph_osd_req_op { struct ceph_osd_data response_data; __u8 class_len; __u8 method_len; - __u8 argc; + u32 indata_len; } cls; struct { u64 cookie; - u64 ver; - u32 prot_ver; - u32 timeout; - __u8 flag; + __u8 op; /* CEPH_OSD_WATCH_OP_ */ + u32 gen; } watch; struct { + struct ceph_osd_data request_data; + } notify_ack; + struct { + u64 cookie; + struct ceph_osd_data request_data; + struct ceph_osd_data response_data; + } notify; + struct { u64 expected_object_size; u64 expected_write_size; } alloc_hint; }; }; +struct ceph_osd_request_target { + struct ceph_object_id base_oid; + struct ceph_object_locator base_oloc; + struct ceph_object_id target_oid; + struct ceph_object_locator target_oloc; + + struct ceph_pg pgid; + u32 pg_num; + u32 pg_num_mask; + struct ceph_osds acting; + struct ceph_osds up; + int size; + int min_size; + bool sort_bitwise; + + unsigned int flags; /* CEPH_OSD_FLAG_* */ + bool paused; + + int osd; +}; + /* an in-flight request */ struct ceph_osd_request { u64 r_tid; /* unique for this client */ struct rb_node r_node; - struct list_head r_req_lru_item; - struct list_head r_osd_item; - struct list_head r_linger_item; - struct list_head r_linger_osd_item; + struct rb_node r_mc_node; /* map check */ struct ceph_osd *r_osd; - struct ceph_pg r_pgid; - int r_pg_osds[CEPH_PG_MAX_SIZE]; - int r_num_pg_osds; + + struct ceph_osd_request_target r_t; +#define r_base_oid r_t.base_oid +#define r_base_oloc r_t.base_oloc +#define r_flags r_t.flags struct ceph_msg *r_request, *r_reply; - int r_flags; /* any additional flags for the osd */ u32 r_sent; /* >0 if r_request is sending/sent */ /* request osd ops array */ unsigned int r_num_ops; - /* these are updated on each send */ - __le32 *r_request_osdmap_epoch; - __le32 *r_request_flags; - __le64 *r_request_pool; - void *r_request_pgid; - __le32 *r_request_attempts; - bool r_paused; - struct ceph_eversion *r_request_reassert_version; - int r_result; - int r_got_reply; - int r_linger; + bool r_got_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; - struct completion r_completion, r_safe_completion; + struct completion r_completion; + struct completion r_safe_completion; /* fsync waiter */ ceph_osdc_callback_t r_callback; ceph_osdc_unsafe_callback_t r_unsafe_callback; - struct ceph_eversion r_reassert_version; struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ void *r_priv; /* ditto */ - struct ceph_object_locator r_base_oloc; - struct ceph_object_id r_base_oid; - struct ceph_object_locator r_target_oloc; - struct ceph_object_id r_target_oid; - - u64 r_snapid; - unsigned long r_stamp; /* send OR check time */ + /* set by submitter */ + u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */ + struct ceph_snap_context *r_snapc; /* for writes */ + struct timespec r_mtime; /* ditto */ + u64 r_data_offset; /* ditto */ + bool r_linger; /* don't resend on failure */ - struct ceph_snap_context *r_snapc; /* snap context for writes */ + /* internal */ + unsigned long r_stamp; /* jiffies, send or check time */ + int r_attempts; + struct ceph_eversion r_replay_version; /* aka reassert_version */ + u32 r_last_force_resend; + u32 r_map_dne_bound; struct ceph_osd_req_op r_ops[]; }; @@ -182,44 +202,70 @@ struct ceph_request_redirect { struct ceph_object_locator oloc; }; -struct ceph_osd_event { - u64 cookie; - int one_shot; +typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie, + u64 notifier_id, void *data, size_t data_len); +typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err); + +struct ceph_osd_linger_request { struct ceph_osd_client *osdc; - void (*cb)(u64, u64, u8, void *); - void *data; - struct rb_node node; - struct list_head osd_node; + u64 linger_id; + bool committed; + bool is_watch; /* watch or notify */ + + struct ceph_osd *osd; + struct ceph_osd_request *reg_req; + struct ceph_osd_request *ping_req; + unsigned long ping_sent; + unsigned long watch_valid_thru; + struct list_head pending_lworks; + + struct ceph_osd_request_target t; + u32 last_force_resend; + u32 map_dne_bound; + + struct timespec mtime; + struct kref kref; -}; + struct mutex lock; + struct rb_node node; /* osd */ + struct rb_node osdc_node; /* osdc */ + struct rb_node mc_node; /* map check */ + struct list_head scan_item; + + struct completion reg_commit_wait; + struct completion notify_finish_wait; + int reg_commit_error; + int notify_finish_error; + int last_error; + + u32 register_gen; + u64 notify_id; + + rados_watchcb2_t wcb; + rados_watcherrcb_t errcb; + void *data; -struct ceph_osd_event_work { - struct work_struct work; - struct ceph_osd_event *event; - u64 ver; - u64 notify_id; - u8 opcode; + struct page ***preply_pages; + size_t *preply_len; }; struct ceph_osd_client { struct ceph_client *client; struct ceph_osdmap *osdmap; /* current map */ - struct rw_semaphore map_sem; - struct completion map_waiters; - u64 last_requested_map; + struct rw_semaphore lock; - struct mutex request_mutex; struct rb_root osds; /* osds */ struct list_head osd_lru; /* idle osds */ - u64 timeout_tid; /* tid of timeout triggering rq */ - u64 last_tid; /* tid of last request */ - struct rb_root requests; /* pending requests */ - struct list_head req_lru; /* in-flight lru */ - struct list_head req_unsent; /* unsent/need-resend queue */ - struct list_head req_notarget; /* map to no osd */ - struct list_head req_linger; /* lingering requests */ - int num_requests; + spinlock_t osd_lru_lock; + struct ceph_osd homeless_osd; + atomic64_t last_tid; /* tid of last request */ + u64 last_linger_id; + struct rb_root linger_requests; /* lingering requests */ + struct rb_root map_checks; + struct rb_root linger_map_checks; + atomic_t num_requests; + atomic_t num_homeless; struct delayed_work timeout_work; struct delayed_work osds_timeout_work; #ifdef CONFIG_DEBUG_FS @@ -231,13 +277,14 @@ struct ceph_osd_client { struct ceph_msgpool msgpool_op; struct ceph_msgpool msgpool_op_reply; - spinlock_t event_lock; - struct rb_root event_tree; - u64 event_count; - struct workqueue_struct *notify_wq; }; +static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) +{ + return osdc->osdmap->flags & flag; +} + extern int ceph_osdc_setup(void); extern void ceph_osdc_cleanup(void); @@ -271,9 +318,6 @@ extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, extern struct ceph_osd_data *osd_req_op_extent_osd_data( struct ceph_osd_request *osd_req, unsigned int which); -extern struct ceph_osd_data *osd_req_op_cls_response_data( - struct ceph_osd_request *osd_req, - unsigned int which); extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *, unsigned int which, @@ -309,9 +353,6 @@ extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode); -extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode, - u64 cookie, u64 version, int flag); extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, unsigned int which, u64 expected_object_size, @@ -322,11 +363,7 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client * unsigned int num_ops, bool use_mempool, gfp_t gfp_flags); - -extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, - struct ceph_snap_context *snapc, - u64 snap_id, - struct timespec *mtime); +int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp); extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, struct ceph_file_layout *layout, @@ -338,9 +375,6 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, u32 truncate_seq, u64 truncate_size, bool use_mempool); -extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, - struct ceph_osd_request *req); - extern void ceph_osdc_get_request(struct ceph_osd_request *req); extern void ceph_osdc_put_request(struct ceph_osd_request *req); @@ -353,6 +387,7 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, extern void ceph_osdc_sync(struct ceph_osd_client *osdc); extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc); +void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc); extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, @@ -371,11 +406,33 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct timespec *mtime, struct page **pages, int nr_pages); -/* watch/notify events */ -extern int ceph_osdc_create_event(struct ceph_osd_client *osdc, - void (*event_cb)(u64, u64, u8, void *), - void *data, struct ceph_osd_event **pevent); -extern void ceph_osdc_cancel_event(struct ceph_osd_event *event); -extern void ceph_osdc_put_event(struct ceph_osd_event *event); +/* watch/notify */ +struct ceph_osd_linger_request * +ceph_osdc_watch(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + rados_watchcb2_t wcb, + rados_watcherrcb_t errcb, + void *data); +int ceph_osdc_unwatch(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); + +int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + u64 notify_id, + u64 cookie, + void *payload, + size_t payload_len); +int ceph_osdc_notify(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + void *payload, + size_t payload_len, + u32 timeout, + struct page ***preply_pages, + size_t *preply_len); +int ceph_osdc_watch_check(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); #endif diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index e55c08bc3a96..9a9041784dcf 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -24,21 +24,29 @@ struct ceph_pg { uint32_t seed; }; -#define CEPH_POOL_FLAG_HASHPSPOOL 1 +int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs); + +#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id + together */ +#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */ struct ceph_pg_pool_info { struct rb_node node; s64 id; - u8 type; + u8 type; /* CEPH_POOL_TYPE_* */ u8 size; + u8 min_size; u8 crush_ruleset; u8 object_hash; + u32 last_force_request_resend; u32 pg_num, pgp_num; int pg_num_mask, pgp_num_mask; s64 read_tier; s64 write_tier; /* wins for read+write ops */ - u64 flags; + u64 flags; /* CEPH_POOL_FLAG_* */ char *name; + + bool was_full; /* for handle_one_map() */ }; static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) @@ -55,8 +63,24 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) struct ceph_object_locator { s64 pool; + struct ceph_string *pool_ns; }; +static inline void ceph_oloc_init(struct ceph_object_locator *oloc) +{ + oloc->pool = -1; + oloc->pool_ns = NULL; +} + +static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc) +{ + return oloc->pool == -1; +} + +void ceph_oloc_copy(struct ceph_object_locator *dest, + const struct ceph_object_locator *src); +void ceph_oloc_destroy(struct ceph_object_locator *oloc); + /* * Maximum supported by kernel client object name length * @@ -64,11 +88,52 @@ struct ceph_object_locator { */ #define CEPH_MAX_OID_NAME_LEN 100 +/* + * 51-char inline_name is long enough for all cephfs and all but one + * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be + * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all + * other rbd requests fit into inline_name. + * + * Makes ceph_object_id 64 bytes on 64-bit. + */ +#define CEPH_OID_INLINE_LEN 52 + +/* + * Both inline and external buffers have space for a NUL-terminator, + * which is carried around. It's not required though - RADOS object + * names don't have to be NUL-terminated and may contain NULs. + */ struct ceph_object_id { - char name[CEPH_MAX_OID_NAME_LEN]; + char *name; + char inline_name[CEPH_OID_INLINE_LEN]; int name_len; }; +static inline void ceph_oid_init(struct ceph_object_id *oid) +{ + oid->name = oid->inline_name; + oid->name_len = 0; +} + +#define CEPH_OID_INIT_ONSTACK(oid) \ + ({ ceph_oid_init(&oid); oid; }) +#define CEPH_DEFINE_OID_ONSTACK(oid) \ + struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid) + +static inline bool ceph_oid_empty(const struct ceph_object_id *oid) +{ + return oid->name == oid->inline_name && !oid->name_len; +} + +void ceph_oid_copy(struct ceph_object_id *dest, + const struct ceph_object_id *src); +__printf(2, 3) +void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...); +__printf(3, 4) +int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, + const char *fmt, ...); +void ceph_oid_destroy(struct ceph_object_id *oid); + struct ceph_pg_mapping { struct rb_node node; struct ceph_pg pgid; @@ -87,7 +152,6 @@ struct ceph_pg_mapping { struct ceph_osdmap { struct ceph_fsid fsid; u32 epoch; - u32 mkfs_epoch; struct ceph_timespec created, modified; u32 flags; /* CEPH_OSDMAP_* */ @@ -113,52 +177,23 @@ struct ceph_osdmap { int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3]; }; -static inline void ceph_oid_set_name(struct ceph_object_id *oid, - const char *name) -{ - int len; - - len = strlen(name); - if (len > sizeof(oid->name)) { - WARN(1, "ceph_oid_set_name '%s' len %d vs %zu, truncating\n", - name, len, sizeof(oid->name)); - len = sizeof(oid->name); - } - - memcpy(oid->name, name, len); - oid->name_len = len; -} - -static inline void ceph_oid_copy(struct ceph_object_id *dest, - struct ceph_object_id *src) -{ - BUG_ON(src->name_len > sizeof(dest->name)); - memcpy(dest->name, src->name, src->name_len); - dest->name_len = src->name_len; -} - -static inline int ceph_osd_exists(struct ceph_osdmap *map, int osd) +static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) { return osd >= 0 && osd < map->max_osd && (map->osd_state[osd] & CEPH_OSD_EXISTS); } -static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd) +static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd) { return ceph_osd_exists(map, osd) && (map->osd_state[osd] & CEPH_OSD_UP); } -static inline int ceph_osd_is_down(struct ceph_osdmap *map, int osd) +static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) { return !ceph_osd_is_up(map, osd); } -static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) -{ - return map && (map->flags & flag); -} - extern char *ceph_osdmap_state_str(char *str, int len, int state); extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); @@ -192,28 +227,59 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) return 0; } +struct ceph_osdmap *ceph_osdmap_alloc(void); extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end); -extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, - struct ceph_osdmap *map, - struct ceph_messenger *msgr); +struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + struct ceph_osdmap *map); extern void ceph_osdmap_destroy(struct ceph_osdmap *map); +struct ceph_osds { + int osds[CEPH_PG_MAX_SIZE]; + int size; + int primary; /* id, NOT index */ +}; + +static inline void ceph_osds_init(struct ceph_osds *set) +{ + set->size = 0; + set->primary = -1; +} + +void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src); + +bool ceph_is_new_interval(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + const struct ceph_osds *old_up, + const struct ceph_osds *new_up, + int old_size, + int new_size, + int old_min_size, + int new_min_size, + u32 old_pg_num, + u32 new_pg_num, + bool old_sort_bitwise, + bool new_sort_bitwise, + const struct ceph_pg *pgid); +bool ceph_osds_changed(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + bool any_change); + /* calculate mapping of a file extent to an object */ extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u64 off, u64 len, u64 *bno, u64 *oxoff, u64 *oxlen); -/* calculate mapping of object to a placement group */ -extern int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap, - struct ceph_object_locator *oloc, - struct ceph_object_id *oid, - struct ceph_pg *pg_out); - -extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, - struct ceph_pg pgid, - int *osds, int *primary); -extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, - struct ceph_pg pgid); +int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + struct ceph_pg *raw_pgid); + +void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, + const struct ceph_pg *raw_pgid, + struct ceph_osds *up, + struct ceph_osds *acting); +int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, + const struct ceph_pg *raw_pgid); extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 2f822dca1046..5c0da61cb763 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -114,8 +114,8 @@ struct ceph_object_layout { * compound epoch+version, used by storage layer to serialize mutations */ struct ceph_eversion { - __le32 epoch; __le64 version; + __le32 epoch; } __attribute__ ((packed)); /* @@ -153,6 +153,11 @@ extern const char *ceph_osd_state_name(int s); #define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */ #define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */ #define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */ +#define CEPH_OSDMAP_NOSCRUB (1<<11) /* block periodic scrub */ +#define CEPH_OSDMAP_NODEEP_SCRUB (1<<12) /* block periodic deep-scrub */ +#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */ +#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */ +#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */ /* * The error code to return when an OSD can't handle a write @@ -389,6 +394,13 @@ enum { CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */ CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */ CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */ + CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000, /* map snap direct to clone id */ + CEPH_OSD_FLAG_ENFORCE_SNAPC = 0x100000, /* use snapc provided even if + pool uses pool snaps */ + CEPH_OSD_FLAG_REDIRECTED = 0x200000, /* op has been redirected */ + CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000, /* redirect bit is authoritative */ + CEPH_OSD_FLAG_FULL_TRY = 0x800000, /* try op despite full flag */ + CEPH_OSD_FLAG_FULL_FORCE = 0x1000000, /* force op despite full flag */ }; enum { @@ -415,7 +427,17 @@ enum { CEPH_OSD_CMPXATTR_MODE_U64 = 2 }; -#define RADOS_NOTIFY_VER 1 +enum { + CEPH_OSD_WATCH_OP_UNWATCH = 0, + CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1, + /* note: use only ODD ids to prevent pre-giant code from + interpreting the op as UNWATCH */ + CEPH_OSD_WATCH_OP_WATCH = 3, + CEPH_OSD_WATCH_OP_RECONNECT = 5, + CEPH_OSD_WATCH_OP_PING = 7, +}; + +const char *ceph_osd_watch_op_name(int o); /* * an individual object operation. each may be accompanied by some data @@ -450,10 +472,14 @@ struct ceph_osd_op { } __attribute__ ((packed)) snap; struct { __le64 cookie; - __le64 ver; - __u8 flag; /* 0 = unwatch, 1 = watch */ + __le64 ver; /* no longer used */ + __u8 op; /* CEPH_OSD_WATCH_OP_* */ + __le32 gen; /* registration generation */ } __attribute__ ((packed)) watch; struct { + __le64 cookie; + } __attribute__ ((packed)) notify; + struct { __le64 offset, length; __le64 src_offset; } __attribute__ ((packed)) clonerange; diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h new file mode 100644 index 000000000000..1b02c96daf75 --- /dev/null +++ b/include/linux/ceph/string_table.h @@ -0,0 +1,62 @@ +#ifndef _FS_CEPH_STRING_TABLE_H +#define _FS_CEPH_STRING_TABLE_H + +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/rbtree.h> +#include <linux/rcupdate.h> + +struct ceph_string { + struct kref kref; + union { + struct rb_node node; + struct rcu_head rcu; + }; + size_t len; + char str[]; +}; + +extern void ceph_release_string(struct kref *ref); +extern struct ceph_string *ceph_find_or_create_string(const char *str, + size_t len); +extern bool ceph_strings_empty(void); + +static inline struct ceph_string *ceph_get_string(struct ceph_string *str) +{ + kref_get(&str->kref); + return str; +} + +static inline void ceph_put_string(struct ceph_string *str) +{ + if (!str) + return; + kref_put(&str->kref, ceph_release_string); +} + +static inline int ceph_compare_string(struct ceph_string *cs, + const char* str, size_t len) +{ + size_t cs_len = cs ? cs->len : 0; + if (cs_len != len) + return cs_len - len; + if (len == 0) + return 0; + return strncmp(cs->str, str, len); +} + +#define ceph_try_get_string(x) \ +({ \ + struct ceph_string *___str; \ + rcu_read_lock(); \ + for (;;) { \ + ___str = rcu_dereference(x); \ + if (!___str || \ + kref_get_unless_zero(&___str->kref)) \ + break; \ + } \ + rcu_read_unlock(); \ + (___str); \ +}) + +#endif diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a20320c666fd..984f73b719a9 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -87,6 +87,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss); struct cgroup *cgroup_get_from_path(const char *path); +struct cgroup *cgroup_get_from_fd(int fd); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index da95258127aa..a39c0c530778 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -25,13 +25,16 @@ #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ #define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ #define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ -#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */ + /* unused */ #define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ #define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ #define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */ +#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ +/* parents need enable during gate/ungate, set rate and re-parent */ +#define CLK_OPS_PARENT_ENABLE BIT(12) struct clk; struct clk_hw; @@ -282,10 +285,18 @@ extern const struct clk_ops clk_fixed_rate_ops; struct clk *clk_register_fixed_rate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate); +struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate, unsigned long fixed_accuracy); void clk_unregister_fixed_rate(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); +void clk_hw_unregister_fixed_rate(struct clk_hw *hw); + void of_fixed_clk_setup(struct device_node *np); /** @@ -326,7 +337,12 @@ struct clk *clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); void clk_unregister_gate(struct clk *clk); +void clk_hw_unregister_gate(struct clk_hw *hw); struct clk_div_table { unsigned int val; @@ -407,12 +423,22 @@ struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider_table(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); void clk_unregister_divider(struct clk *clk); +void clk_hw_unregister_divider(struct clk_hw *hw); /** * struct clk_mux - multiplexer clock @@ -463,14 +489,25 @@ struct clk *clk_register_mux(struct device *dev, const char *name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); void clk_unregister_mux(struct clk *clk); +void clk_hw_unregister_mux(struct clk_hw *hw); void of_fixed_factor_clk_setup(struct device_node *node); @@ -499,6 +536,10 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); void clk_unregister_fixed_factor(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); +void clk_hw_unregister_fixed_factor(struct clk_hw *hw); /** * struct clk_fractional_divider - adjustable fractional divider clock @@ -533,6 +574,11 @@ struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); +void clk_hw_unregister_fractional_divider(struct clk_hw *hw); /** * struct clk_multiplier - adjustable multiplier clock @@ -603,6 +649,14 @@ struct clk *clk_register_composite(struct device *dev, const char *name, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags); +void clk_unregister_composite(struct clk *clk); +struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); +void clk_hw_unregister_composite(struct clk_hw *hw); /*** * struct clk_gpio_gate - gpio gated clock @@ -625,6 +679,10 @@ extern const struct clk_ops clk_gpio_gate_ops; struct clk *clk_register_gpio_gate(struct device *dev, const char *name, const char *parent_name, unsigned gpio, bool active_low, unsigned long flags); +struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, unsigned gpio, bool active_low, + unsigned long flags); +void clk_hw_unregister_gpio_gate(struct clk_hw *hw); /** * struct clk_gpio_mux - gpio controlled clock multiplexer @@ -640,6 +698,10 @@ extern const struct clk_ops clk_gpio_mux_ops; struct clk *clk_register_gpio_mux(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned gpio, bool active_low, unsigned long flags); +struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low, unsigned long flags); +void clk_hw_unregister_gpio_mux(struct clk_hw *hw); /** * clk_register - allocate a new clock, register it and return an opaque cookie @@ -655,9 +717,15 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name, struct clk *clk_register(struct device *dev, struct clk_hw *hw); struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw); +int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw); +int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw); + void clk_unregister(struct clk *clk); void devm_clk_unregister(struct device *dev, struct clk *clk); +void clk_hw_unregister(struct clk_hw *hw); +void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); + /* helper functions */ const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); @@ -703,6 +771,11 @@ struct clk_onecell_data { unsigned int clk_num; }; +struct clk_hw_onecell_data { + size_t num; + struct clk_hw *hws[]; +}; + extern struct of_device_id __clk_of_table; #define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) @@ -712,15 +785,24 @@ int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, void *data), void *data); +int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data); void of_clk_del_provider(struct device_node *np); struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, + void *data); struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec, + void *data); unsigned int of_clk_get_parent_count(struct device_node *np); int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size); const char *of_clk_get_parent_name(struct device_node *np, int index); - +int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags); void of_clk_init(const struct of_device_id *matches); #else /* !CONFIG_OF */ @@ -732,17 +814,34 @@ static inline int of_clk_add_provider(struct device_node *np, { return 0; } +static inline int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data) +{ + return 0; +} static inline void of_clk_del_provider(struct device_node *np) {} static inline struct clk *of_clk_src_simple_get( struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } +static inline struct clk_hw * +of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} static inline struct clk *of_clk_src_onecell_get( struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } +static inline struct clk_hw * +of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} static inline int of_clk_get_parent_count(struct device_node *np) { return 0; @@ -757,6 +856,11 @@ static inline const char *of_clk_get_parent_name(struct device_node *np, { return NULL; } +static inline int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags) +{ + return 0; +} static inline void of_clk_init(const struct of_device_id *matches) {} #endif /* CONFIG_OF */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 0df4a51e1a78..123c02788807 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -20,8 +20,6 @@ struct device; struct clk; -#ifdef CONFIG_COMMON_CLK - /** * DOC: clk notifier callback types * @@ -78,6 +76,8 @@ struct clk_notifier_data { unsigned long new_rate; }; +#ifdef CONFIG_COMMON_CLK + /** * clk_notifier_register: register a clock rate-change notifier callback * @clk: clock whose rate we are interested in @@ -140,6 +140,18 @@ bool clk_is_match(const struct clk *p, const struct clk *q); #else +static inline int clk_notifier_register(struct clk *clk, + struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline int clk_notifier_unregister(struct clk *clk, + struct notifier_block *nb) +{ + return -ENOTSUPP; +} + static inline long clk_get_accuracy(struct clk *clk) { return -ENOTSUPP; @@ -461,6 +473,10 @@ static inline struct clk *clk_get_parent(struct clk *clk) return NULL; } +static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) +{ + return NULL; +} #endif /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index 7adfd80fbf55..ba6fa4148515 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h @@ -24,12 +24,20 @@ void r8a7778_clocks_init(u32 mode); void r8a7779_clocks_init(u32 mode); void rcar_gen2_clocks_init(u32 mode); -#ifdef CONFIG_PM_GENERIC_DOMAINS_OF void cpg_mstp_add_clk_domain(struct device_node *np); -int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev); -void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev); +#ifdef CONFIG_CLK_RENESAS_CPG_MSTP +int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev); #else -static inline void cpg_mstp_add_clk_domain(struct device_node *np) {} +#define cpg_mstp_attach_dev NULL +#define cpg_mstp_detach_dev NULL #endif +#ifdef CONFIG_CLK_RENESAS_CPG_MSSR +int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev); +#else +#define cpg_mssr_attach_dev NULL +#define cpg_mssr_detach_dev NULL +#endif #endif diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 57bf7aab4516..7007a5f48080 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -121,4 +121,9 @@ static inline void tegra_cpu_clock_resume(void) } #endif +extern void tegra210_xusb_pll_hw_control_enable(void); +extern void tegra210_xusb_pll_hw_sequence_start(void); +extern void tegra210_sata_pll_hw_control_enable(void); +extern void tegra210_sata_pll_hw_sequence_start(void); + #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index dc5164a6df29..6110fe09ed18 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -37,6 +37,7 @@ * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() * @min_divider: minimum valid non-bypass divider value (actual) * @max_divider: maximum valid non-bypass divider value (actual) + * @max_rate: maximum clock rate for the DPLL * @modes: possible values of @enable_mask * @autoidle_reg: register containing the DPLL autoidle mode bitfield * @idlest_reg: register containing the DPLL idle status bitfield @@ -81,6 +82,7 @@ struct dpll_data { u8 last_rounded_n; u8 min_divider; u16 max_divider; + unsigned long max_rate; u8 modes; void __iomem *autoidle_reg; void __iomem *idlest_reg; diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index c2c04f7cbe8a..2eabc862abdb 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -15,6 +15,7 @@ #include <asm/clkdev.h> struct clk; +struct clk_hw; struct device; struct clk_lookup { @@ -34,18 +35,22 @@ struct clk_lookup { struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); void clkdev_add_table(struct clk_lookup *, size_t); int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); -int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); +int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); #ifdef CONFIG_COMMON_CLK int __clk_get(struct clk *clk); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index a307bf62974f..08398182f56e 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -15,6 +15,7 @@ #include <linux/cache.h> #include <linux/timer.h> #include <linux/init.h> +#include <linux/of.h> #include <asm/div64.h> #include <asm/io.h> @@ -243,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *, extern int clocksource_i8253_init(void); #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ - OF_DECLARE_1(clksrc, name, compat, fn) + OF_DECLARE_1_RET(clksrc, name, compat, fn) #ifdef CONFIG_CLKSRC_PROBE extern void clocksource_probe(void); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index d7c8de583a23..d4e106b5dc27 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -1,30 +1,59 @@ #ifndef _LINUX_COMPACTION_H #define _LINUX_COMPACTION_H +/* + * Determines how hard direct compaction should try to succeed. + * Lower value means higher priority, analogically to reclaim priority. + */ +enum compact_priority { + COMPACT_PRIO_SYNC_LIGHT, + MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + COMPACT_PRIO_ASYNC, + INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC +}; + /* Return values for compact_zone() and try_to_compact_pages() */ -/* compaction didn't start as it was deferred due to past failures */ -#define COMPACT_DEFERRED 0 -/* compaction didn't start as it was not possible or direct reclaim was more suitable */ -#define COMPACT_SKIPPED 1 -/* compaction should continue to another pageblock */ -#define COMPACT_CONTINUE 2 -/* direct compaction partially compacted a zone and there are suitable pages */ -#define COMPACT_PARTIAL 3 -/* The full zone was compacted */ -#define COMPACT_COMPLETE 4 -/* For more detailed tracepoint output */ -#define COMPACT_NO_SUITABLE_PAGE 5 -#define COMPACT_NOT_SUITABLE_ZONE 6 -#define COMPACT_CONTENDED 7 /* When adding new states, please adjust include/trace/events/compaction.h */ - -/* Used to signal whether compaction detected need_sched() or lock contention */ -/* No contention detected */ -#define COMPACT_CONTENDED_NONE 0 -/* Either need_sched() was true or fatal signal pending */ -#define COMPACT_CONTENDED_SCHED 1 -/* Zone lock or lru_lock was contended in async compaction */ -#define COMPACT_CONTENDED_LOCK 2 +enum compact_result { + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NOT_SUITABLE_ZONE, + /* + * compaction didn't start as it was not possible or direct reclaim + * was more suitable + */ + COMPACT_SKIPPED, + /* compaction didn't start as it was deferred due to past failures */ + COMPACT_DEFERRED, + + /* compaction not active last round */ + COMPACT_INACTIVE = COMPACT_DEFERRED, + + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NO_SUITABLE_PAGE, + /* compaction should continue to another pageblock */ + COMPACT_CONTINUE, + + /* + * The full zone was compacted scanned but wasn't successfull to compact + * suitable pages. + */ + COMPACT_COMPLETE, + /* + * direct compaction has scanned part of the zone but wasn't successfull + * to compact suitable pages. + */ + COMPACT_PARTIAL_SKIPPED, + + /* compaction terminated prematurely due to lock contentions */ + COMPACT_CONTENDED, + + /* + * direct compaction partially compacted a zone and there might be + * suitable pages + */ + COMPACT_PARTIAL, +}; struct alloc_context; /* in mm/internal.h */ @@ -38,13 +67,13 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); -extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, - int alloc_flags, const struct alloc_context *ac, - enum migrate_mode mode, int *contended); +extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, + unsigned int order, unsigned int alloc_flags, + const struct alloc_context *ac, enum compact_priority prio); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order, - int alloc_flags, int classzone_idx); +extern enum compact_result compaction_suitable(struct zone *zone, int order, + unsigned int alloc_flags, int classzone_idx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); @@ -52,19 +81,79 @@ extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); extern bool compaction_restarting(struct zone *zone, int order); +/* Compaction has made some progress and retrying makes sense */ +static inline bool compaction_made_progress(enum compact_result result) +{ + /* + * Even though this might sound confusing this in fact tells us + * that the compaction successfully isolated and migrated some + * pageblocks. + */ + if (result == COMPACT_PARTIAL) + return true; + + return false; +} + +/* Compaction has failed and it doesn't make much sense to keep retrying. */ +static inline bool compaction_failed(enum compact_result result) +{ + /* All zones were scanned completely and still not result. */ + if (result == COMPACT_COMPLETE) + return true; + + return false; +} + +/* + * Compaction has backed off for some reason. It might be throttling or + * lock contention. Retrying is still worthwhile. + */ +static inline bool compaction_withdrawn(enum compact_result result) +{ + /* + * Compaction backed off due to watermark checks for order-0 + * so the regular reclaim has to try harder and reclaim something. + */ + if (result == COMPACT_SKIPPED) + return true; + + /* + * If compaction is deferred for high-order allocations, it is + * because sync compaction recently failed. If this is the case + * and the caller requested a THP allocation, we do not want + * to heavily disrupt the system, so we fail the allocation + * instead of entering direct reclaim. + */ + if (result == COMPACT_DEFERRED) + return true; + + /* + * If compaction in async mode encounters contention or blocks higher + * priority task we back off early rather than cause stalls. + */ + if (result == COMPACT_CONTENDED) + return true; + + /* + * Page scanners have met but we haven't scanned full zones so this + * is a back off in fact. + */ + if (result == COMPACT_PARTIAL_SKIPPED) + return true; + + return false; +} + + +bool compaction_zonelist_suitable(struct alloc_context *ac, int order, + int alloc_flags); + extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else -static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, - unsigned int order, int alloc_flags, - const struct alloc_context *ac, - enum migrate_mode mode, int *contended) -{ - return COMPACT_CONTINUE; -} - static inline void compact_pgdat(pg_data_t *pgdat, int order) { } @@ -73,7 +162,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order, +static inline enum compact_result compaction_suitable(struct zone *zone, int order, int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; @@ -88,6 +177,21 @@ static inline bool compaction_deferred(struct zone *zone, int order) return true; } +static inline bool compaction_made_progress(enum compact_result result) +{ + return false; +} + +static inline bool compaction_failed(enum compact_result result) +{ + return false; +} + +static inline bool compaction_withdrawn(enum compact_result result) +{ + return true; +} + static inline int kcompactd_run(int nid) { return 0; @@ -103,6 +207,7 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i #endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +struct node; extern int compaction_register_node(struct node *node); extern void compaction_unregister_node(struct node *node); diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 3d5202eda22f..573c5a18908f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -142,6 +142,7 @@ #if GCC_VERSION >= 30400 #define __must_check __attribute__((warn_unused_result)) +#define __malloc __attribute__((__malloc__)) #endif #if GCC_VERSION >= 40000 @@ -157,7 +158,7 @@ #define __compiler_offsetof(a, b) \ __builtin_offsetof(a, b) -#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 +#if GCC_VERSION >= 40100 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0) #endif @@ -241,7 +242,11 @@ */ #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) -#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP +/* + * sparse (__CHECKER__) pretends to be gcc, but can't do constant + * folding in __builtin_bswap*() (yet), so don't set these for it. + */ +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) #if GCC_VERSION >= 40400 #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ @@ -249,7 +254,7 @@ #if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ #endif -#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #if GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 diff --git a/include/linux/compiler.h b/include/linux/compiler.h index b5ff9881bef8..668569844d37 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -17,7 +17,6 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) -# define __pmem __attribute__((noderef, address_space(5))) #ifdef CONFIG_SPARSE_RCU_POINTER # define __rcu __attribute__((noderef, address_space(4))) #else /* CONFIG_SPARSE_RCU_POINTER */ @@ -45,7 +44,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __cond_lock(x,c) (c) # define __percpu # define __rcu -# define __pmem # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) #endif /* __CHECKER__ */ @@ -304,23 +302,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) -/** - * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering - * @cond: boolean expression to wait for - * - * Equivalent to using smp_load_acquire() on the condition variable but employs - * the control dependency of the wait to reduce the barrier on many platforms. - * - * The control dependency provides a LOAD->STORE order, the additional RMB - * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, - * aka. ACQUIRE. - */ -#define smp_cond_acquire(cond) do { \ - while (!(cond)) \ - cpu_relax(); \ - smp_rmb(); /* ctrl + rmb := acquire */ \ -} while (0) - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -357,6 +338,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define __deprecated_for_modules #endif +#ifndef __malloc +#define __malloc +#endif + /* * Allow us to avoid 'defined but not used' warnings on functions and data, * as well as force them to be emitted to the assembly file. @@ -541,10 +526,15 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * Similar to rcu_dereference(), but for situations where the pointed-to * object's lifetime is managed by something other than RCU. That * "something other" might be reference counting or simple immortality. + * + * The seemingly unused variable ___typecheck_p validates that @p is + * indeed a pointer type by using a pointer to typeof(*p) as the type. + * Taking a pointer to typeof(*p) again is needed in case p is void *. */ #define lockless_dereference(p) \ ({ \ typeof(p) _________p1 = READ_ONCE(p); \ + typeof(*(p)) *___typecheck_p __maybe_unused; \ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ (_________p1); \ }) diff --git a/include/linux/console.h b/include/linux/console.h index ea731af2451e..d530c4627e54 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -28,6 +28,13 @@ struct tty_struct; #define VT100ID "\033[?1;2c" #define VT102ID "\033[?6c" +/** + * struct consw - callbacks for consoles + * + * @con_set_palette: sets the palette of the console to @table (optional) + * @con_scrolldelta: the contents of the console should be scrolled by @lines. + * Invoked by user. (optional) + */ struct consw { struct module *owner; const char *(*con_startup)(void); @@ -38,7 +45,6 @@ struct consw { void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); void (*con_cursor)(struct vc_data *, int); int (*con_scroll)(struct vc_data *, int, int, int, int); - void (*con_bmove)(struct vc_data *, int, int, int, int, int, int); int (*con_switch)(struct vc_data *); int (*con_blank)(struct vc_data *, int, int); int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); @@ -47,8 +53,9 @@ struct consw { int (*con_font_copy)(struct vc_data *, int); int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); - int (*con_set_palette)(struct vc_data *, unsigned char *); - int (*con_scrolldelta)(struct vc_data *, int); + void (*con_set_palette)(struct vc_data *, + const unsigned char *table); + void (*con_scrolldelta)(struct vc_data *, int lines); int (*con_set_origin)(struct vc_data *); void (*con_save_screen)(struct vc_data *); u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); @@ -191,6 +198,8 @@ void vcs_remove_sysfs(int index); #ifdef CONFIG_VGA_CONSOLE extern bool vgacon_text_force(void); +#else +static inline bool vgacon_text_force(void) { return false; } #endif #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index e329ee2667e1..6fd3c908a340 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -21,6 +21,38 @@ struct uni_pagedir; #define NPAR 16 +/* + * Example: vc_data of a console that was scrolled 3 lines down. + * + * Console buffer + * vc_screenbuf ---------> +----------------------+-. + * | initializing W | \ + * | initializing X | | + * | initializing Y | > scroll-back area + * | initializing Z | | + * | | / + * vc_visible_origin ---> ^+----------------------+-: + * (changes by scroll) || Welcome to linux | \ + * || | | + * vc_rows --->< | login: root | | visible on console + * || password: | > (vc_screenbuf_size is + * vc_origin -----------> || | | vc_size_row * vc_rows) + * (start when no scroll) || Last login: 12:28 | / + * v+----------------------+-: + * | Have a lot of fun... | \ + * vc_pos -----------------|--------v | > scroll-front area + * | ~ # cat_ | / + * vc_scr_end -----------> +----------------------+-: + * (vc_origin + | | \ EMPTY, to be filled by + * vc_screenbuf_size) | | / vc_video_erase_char + * +----------------------+-' + * <---- 2 * vc_cols -----> + * <---- vc_size_row -----> + * + * Note that every character in the console buffer is accompanied with an + * attribute in the buffer right after the character. This is not depicted + * in the figure. + */ struct vc_data { struct tty_port port; /* Upper level data */ @@ -74,7 +106,6 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ - unsigned int vc_deccolm : 1; /* 80/132 Column Mode */ /* attribute flags */ unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ unsigned int vc_italic:1; @@ -136,6 +167,9 @@ extern void vc_SAK(struct work_struct *work); #define CUR_DEFAULT CUR_UNDERLINE -#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp) +static inline bool con_is_visible(const struct vc_data *vc) +{ + return *vc->vc_display_fg == vc; +} #endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index d259274238db..c78fc27418f2 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -31,6 +31,19 @@ static inline void user_exit(void) context_tracking_exit(CONTEXT_USER); } +/* Called with interrupts disabled. */ +static inline void user_enter_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_enter(CONTEXT_USER); + +} +static inline void user_exit_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_exit(CONTEXT_USER); +} + static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; @@ -69,6 +82,8 @@ static inline enum ctx_state ct_state(void) #else static inline void user_enter(void) { } static inline void user_exit(void) { } +static inline void user_enter_irqoff(void) { } +static inline void user_exit_irqoff(void) { } static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } @@ -84,7 +99,8 @@ static inline void context_tracking_init(void) { } #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -static inline void guest_enter(void) +/* must be called with irqs disabled */ +static inline void guest_enter_irqoff(void) { if (vtime_accounting_cpu_enabled()) vtime_guest_enter(current); @@ -93,9 +109,19 @@ static inline void guest_enter(void) if (context_tracking_is_enabled()) __context_tracking_enter(CONTEXT_GUEST); + + /* KVM does not hold any references to rcu protected data when it + * switches CPU into a guest mode. In fact switching to a guest mode + * is very similar to exiting to userspace from rcu point of view. In + * addition CPU may stay in a guest mode for quite a long time (up to + * one time slice). Lets treat guest mode as quiescent state, just like + * we do with user-mode execution. + */ + if (!context_tracking_cpu_is_enabled()) + rcu_virt_note_context_switch(smp_processor_id()); } -static inline void guest_exit(void) +static inline void guest_exit_irqoff(void) { if (context_tracking_is_enabled()) __context_tracking_exit(CONTEXT_GUEST); @@ -107,7 +133,7 @@ static inline void guest_exit(void) } #else -static inline void guest_enter(void) +static inline void guest_enter_irqoff(void) { /* * This is running in ioctl context so its safe @@ -116,9 +142,10 @@ static inline void guest_enter(void) */ vtime_account_system(current); current->flags |= PF_VCPU; + rcu_virt_note_context_switch(smp_processor_id()); } -static inline void guest_exit(void) +static inline void guest_exit_irqoff(void) { /* Flush the guest cputime we spent on the guest */ vtime_account_system(current); @@ -126,4 +153,22 @@ static inline void guest_exit(void) } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ +static inline void guest_enter(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_enter_irqoff(); + local_irq_restore(flags); +} + +static inline void guest_exit(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_exit_irqoff(); + local_irq_restore(flags); +} + #endif diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h new file mode 100644 index 000000000000..a978bb85599a --- /dev/null +++ b/include/linux/coresight-stm.h @@ -0,0 +1,6 @@ +#ifndef __LINUX_CORESIGHT_STM_H_ +#define __LINUX_CORESIGHT_STM_H_ + +#include <uapi/linux/coresight-stm.h> + +#endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h index f9b1fab4388a..797d9c8e9a1b 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -55,35 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t); #endif struct notifier_block; -/* - * CPU notifier priorities. - */ -enum { - /* - * SCHED_ACTIVE marks a cpu which is coming up active during - * CPU_ONLINE and CPU_DOWN_FAILED and must be the first - * notifier. CPUSET_ACTIVE adjusts cpuset according to - * cpu_active mask right after SCHED_ACTIVE. During - * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are - * ordered in the similar way. - * - * This ordering guarantees consistent cpu_active mask and - * migration behavior to all cpu notifiers. - */ - CPU_PRI_SCHED_ACTIVE = INT_MAX, - CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, - CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, - CPU_PRI_CPUSET_INACTIVE = INT_MIN, - - /* migration should happen before other stuff but after perf */ - CPU_PRI_PERF = 20, - CPU_PRI_MIGRATION = 10, - - /* bring up workqueues before normal notifiers and down after */ - CPU_PRI_WORKQUEUE_UP = 5, - CPU_PRI_WORKQUEUE_DOWN = -5, -}; - #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h deleted file mode 100644 index 0414009e2c30..000000000000 --- a/include/linux/cpufreq-dt.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2014 Marvell - * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __CPUFREQ_DT_H__ -#define __CPUFREQ_DT_H__ - -struct cpufreq_dt_platform_data { - /* - * True when each CPU has its own clock to control its - * frequency, false when all CPUs are controlled by a single - * clock. - */ - bool independent_clocks; -}; - -#endif /* __CPUFREQ_DT_H__ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 718e8725de8a..631ba33bbe9f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -36,6 +36,12 @@ struct cpufreq_governor; +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED, + CPUFREQ_TABLE_SORTED_ASCENDING, + CPUFREQ_TABLE_SORTED_DESCENDING +}; + struct cpufreq_freqs { unsigned int cpu; /* cpu nr */ unsigned int old; @@ -87,6 +93,7 @@ struct cpufreq_policy { struct cpufreq_user_policy user_policy; struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; struct list_head policy_list; struct kobject kobj; @@ -102,6 +109,21 @@ struct cpufreq_policy { */ struct rw_semaphore rwsem; + /* + * Fast switch flags: + * - fast_switch_possible should be set by the driver if it can + * guarantee that frequency can be changed on any CPU sharing the + * policy and that the change will affect all of the policy CPUs then. + * - fast_switch_enabled is to be set by governors that support fast + * freqnency switching with the help of cpufreq_enable_fast_switch(). + */ + bool fast_switch_possible; + bool fast_switch_enabled; + + /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ + unsigned int cached_target_freq; + int cached_resolved_idx; + /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ spinlock_t transition_lock; @@ -156,6 +178,8 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); bool have_governor_per_policy(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); +void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); +void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); #else static inline unsigned int cpufreq_get(unsigned int cpu) { @@ -172,6 +196,18 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) static inline void disable_cpufreq(void) { } #endif +#ifdef CONFIG_CPU_FREQ_STAT +void cpufreq_stats_create_table(struct cpufreq_policy *policy); +void cpufreq_stats_free_table(struct cpufreq_policy *policy); +void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq); +#else +static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq) { } +#endif /* CONFIG_CPU_FREQ_STAT */ + /********************************************************************* * CPUFREQ DRIVER INTERFACE * *********************************************************************/ @@ -236,6 +272,18 @@ struct cpufreq_driver { unsigned int relation); /* Deprecated */ int (*target_index)(struct cpufreq_policy *policy, unsigned int index); + unsigned int (*fast_switch)(struct cpufreq_policy *policy, + unsigned int target_freq); + + /* + * Caches and returns the lowest driver-supported frequency greater than + * or equal to the target frequency, subject to any driver limitations. + * Does not set the frequency. Only to be implemented for drivers with + * target(). + */ + unsigned int (*resolve_freq)(struct cpufreq_policy *policy, + unsigned int target_freq); + /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -426,18 +474,27 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) -/* Governor Events */ -#define CPUFREQ_GOV_START 1 -#define CPUFREQ_GOV_STOP 2 -#define CPUFREQ_GOV_LIMITS 3 -#define CPUFREQ_GOV_POLICY_INIT 4 -#define CPUFREQ_GOV_POLICY_EXIT 5 +/* + * The polling frequency depends on the capability of the processor. Default + * polling frequency is 1000 times the transition latency of the processor. The + * ondemand governor will work on any processor with transition latency <= 10ms, + * using appropriate sampling rate. + * + * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) + * the ondemand governor will not work. All times here are in us (microseconds). + */ +#define MIN_SAMPLING_RATE_RATIO (2) +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (20) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; - int initialized; - int (*governor) (struct cpufreq_policy *policy, - unsigned int event); + int (*init)(struct cpufreq_policy *policy); + void (*exit)(struct cpufreq_policy *policy); + int (*start)(struct cpufreq_policy *policy); + void (*stop)(struct cpufreq_policy *policy); + void (*limits)(struct cpufreq_policy *policy); ssize_t (*show_setspeed) (struct cpufreq_policy *policy, char *buf); int (*store_setspeed) (struct cpufreq_policy *policy, @@ -450,18 +507,53 @@ struct cpufreq_governor { }; /* Pass a target to the cpufreq driver */ +unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq); int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); +unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); +static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) +{ + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); +} + +/* Governor attribute set */ +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +/* sysfs ops for cpufreq governors */ +extern const struct sysfs_ops governor_sysfs_ops; + +void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); +void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); +unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); + +/* Governor sysfs attribute */ +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); + ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, + size_t count); +}; + /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ @@ -528,11 +620,9 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); -int cpufreq_frequency_table_target(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table, - unsigned int target_freq, - unsigned int relation, - unsigned int *index); +int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, unsigned int freq); @@ -543,6 +633,227 @@ int cpufreq_boost_trigger_state(int state); int cpufreq_boost_enabled(void); int cpufreq_enable_boost_support(void); bool policy_has_boost_freq(struct cpufreq_policy *policy); + +/* Find lowest freq at or above target in a table in ascending order */ +static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq >= target_freq) + return i; + + best = i; + } + + return best; +} + +/* Find lowest freq at or above target in a table in descending order */ +static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq > target_freq) { + best = i; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return i; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_al(policy, target_freq); + else + return cpufreq_table_find_index_dl(policy, target_freq); +} + +/* Find highest freq at or below target in a table in ascending order */ +static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq < target_freq) { + best = i; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return i; + + return best; + } + + return best; +} + +/* Find highest freq at or below target in a table in descending order */ +static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq <= target_freq) + return i; + + best = i; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ah(policy, target_freq); + else + return cpufreq_table_find_index_dh(policy, target_freq); +} + +/* Find closest freq to target in a table in ascending order */ +static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq < target_freq) { + best = i; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return i; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return i; + + return best; + } + + return best; +} + +/* Find closest freq to target in a table in descending order */ +static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq > target_freq) { + best = i; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return i; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return i; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ac(policy, target_freq); + else + return cpufreq_table_find_index_dc(policy, target_freq); +} + +static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) + return cpufreq_table_index_unsorted(policy, target_freq, + relation); + + switch (relation) { + case CPUFREQ_RELATION_L: + return cpufreq_table_find_index_l(policy, target_freq); + case CPUFREQ_RELATION_H: + return cpufreq_table_find_index_h(policy, target_freq); + case CPUFREQ_RELATION_C: + return cpufreq_table_find_index_c(policy, target_freq); + default: + pr_err("%s: Invalid relation: %d\n", __func__, relation); + return -EINVAL; + } +} #else static inline int cpufreq_boost_trigger_state(int state) { @@ -563,8 +874,6 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) return false; } #endif -/* the following funtion is for cpufreq core use only */ -struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5d68e15e46b7..34bd80512a0c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -1,21 +1,101 @@ #ifndef __CPUHOTPLUG_H #define __CPUHOTPLUG_H +#include <linux/types.h> + enum cpuhp_state { CPUHP_OFFLINE, CPUHP_CREATE_THREADS, + CPUHP_PERF_PREPARE, + CPUHP_PERF_X86_PREPARE, + CPUHP_PERF_X86_UNCORE_PREP, + CPUHP_PERF_X86_AMD_UNCORE_PREP, + CPUHP_PERF_X86_RAPL_PREP, + CPUHP_PERF_BFIN, + CPUHP_PERF_POWER, + CPUHP_PERF_SUPERH, + CPUHP_X86_HPET_DEAD, + CPUHP_X86_APB_DEAD, + CPUHP_WORKQUEUE_PREP, + CPUHP_POWER_NUMA_PREPARE, + CPUHP_HRTIMERS_PREPARE, + CPUHP_PROFILE_PREPARE, + CPUHP_X2APIC_PREPARE, + CPUHP_SMPCFD_PREPARE, + CPUHP_RCUTREE_PREP, CPUHP_NOTIFY_PREPARE, + CPUHP_TIMERS_DEAD, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, + CPUHP_AP_SCHED_STARTING, + CPUHP_AP_RCUTREE_DYING, + CPUHP_AP_IRQ_GIC_STARTING, + CPUHP_AP_IRQ_GICV3_STARTING, + CPUHP_AP_IRQ_HIP04_STARTING, + CPUHP_AP_IRQ_ARMADA_XP_STARTING, + CPUHP_AP_IRQ_ARMADA_CASC_STARTING, + CPUHP_AP_IRQ_BCM2836_STARTING, + CPUHP_AP_ARM_MVEBU_COHERENCY, + CPUHP_AP_PERF_X86_UNCORE_STARTING, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, + CPUHP_AP_PERF_X86_STARTING, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING, + CPUHP_AP_PERF_X86_CQM_STARTING, + CPUHP_AP_PERF_X86_CSTATE_STARTING, + CPUHP_AP_PERF_XTENSA_STARTING, + CPUHP_AP_PERF_METAG_STARTING, + CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, + CPUHP_AP_ARM_VFP_STARTING, + CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_ARM_L2X0_STARTING, + CPUHP_AP_ARM_ARCH_TIMER_STARTING, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, + CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, + CPUHP_AP_ARM_TWD_STARTING, + CPUHP_AP_METAG_TIMER_STARTING, + CPUHP_AP_QCOM_TIMER_STARTING, + CPUHP_AP_ARMADA_TIMER_STARTING, + CPUHP_AP_MARCO_TIMER_STARTING, + CPUHP_AP_MIPS_GIC_TIMER_STARTING, + CPUHP_AP_ARC_TIMER_STARTING, + CPUHP_AP_KVM_STARTING, + CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, + CPUHP_AP_KVM_ARM_VGIC_STARTING, + CPUHP_AP_KVM_ARM_TIMER_STARTING, + CPUHP_AP_ARM_XEN_STARTING, + CPUHP_AP_ARM_CORESIGHT_STARTING, + CPUHP_AP_ARM_CORESIGHT4_STARTING, + CPUHP_AP_ARM64_ISNDEP_STARTING, + CPUHP_AP_SMPCFD_DYING, + CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, CPUHP_AP_SMPBOOT_THREADS, + CPUHP_AP_X86_VDSO_VMA_ONLINE, + CPUHP_AP_PERF_ONLINE, + CPUHP_AP_PERF_X86_ONLINE, + CPUHP_AP_PERF_X86_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, + CPUHP_AP_PERF_X86_RAPL_ONLINE, + CPUHP_AP_PERF_X86_CQM_ONLINE, + CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_S390_CF_ONLINE, + CPUHP_AP_PERF_S390_SF_ONLINE, + CPUHP_AP_PERF_ARM_CCI_ONLINE, + CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_WORKQUEUE_ONLINE, + CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, + CPUHP_AP_X86_HPET_ONLINE, + CPUHP_AP_X86_KVM_CLK_ONLINE, + CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 786ad32631a6..bb31373c3478 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); +static inline struct cpuidle_device *cpuidle_get_device(void) +{return __this_cpu_read(cpuidle_devices); } #else static inline void disable_cpuidle(void) { } static inline bool cpuidle_not_available(struct cpuidle_driver *drv, @@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } +static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } #endif #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) @@ -249,4 +252,22 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) #define CPUIDLE_DRIVER_STATE_START 0 #endif +#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ +({ \ + int __ret; \ + \ + if (!idx) { \ + cpu_do_idle(); \ + return idx; \ + } \ + \ + __ret = cpu_pm_enter(); \ + if (!__ret) { \ + __ret = low_level_idle_enter(idx); \ + cpu_pm_exit(); \ + } \ + \ + __ret ? -1 : idx; \ +}) + #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 40cee6b77a93..da7fbf1cdd56 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -579,7 +579,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, } /** - * cpumask_parse - extract a cpumask from from a string + * cpumask_parse - extract a cpumask from a string * @buf: the buffer to extract from * @dstp: the cpumask to set. * @@ -743,12 +743,10 @@ set_cpu_present(unsigned int cpu, bool present) static inline void set_cpu_online(unsigned int cpu, bool online) { - if (online) { + if (online) cpumask_set_cpu(cpu, &__cpu_online_mask); - cpumask_set_cpu(cpu, &__cpu_active_mask); - } else { + else cpumask_clear_cpu(cpu, &__cpu_online_mask); - } } static inline void diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 85a868ccb493..bfc204e70338 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -16,26 +16,26 @@ #ifdef CONFIG_CPUSETS -extern struct static_key cpusets_enabled_key; +extern struct static_key_false cpusets_enabled_key; static inline bool cpusets_enabled(void) { - return static_key_false(&cpusets_enabled_key); + return static_branch_unlikely(&cpusets_enabled_key); } static inline int nr_cpusets(void) { /* jump label reference count + the top-level cpuset */ - return static_key_count(&cpusets_enabled_key) + 1; + return static_key_count(&cpusets_enabled_key.key) + 1; } static inline void cpuset_inc(void) { - static_key_slow_inc(&cpusets_enabled_key); + static_branch_inc(&cpusets_enabled_key); } static inline void cpuset_dec(void) { - static_key_slow_dec(&cpusets_enabled_key); + static_branch_dec(&cpusets_enabled_key); } extern int cpuset_init(void); @@ -48,16 +48,25 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); +extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); -static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); + if (cpusets_enabled()) + return __cpuset_node_allowed(node, gfp_mask); + return true; } -static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return cpuset_node_allowed(zone_to_nid(z), gfp_mask); + return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + if (cpusets_enabled()) + return __cpuset_zone_allowed(z, gfp_mask); + return true; } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -172,14 +181,19 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) return 1; } -static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { - return 1; + return true; } -static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return 1; + return true; +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + return true; } static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 3849fce7ecfe..3873697ba21c 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -34,9 +34,13 @@ void vmcore_cleanup(void); /* * Architecture code can redefine this if there are any special checks - * needed for 64-bit ELF vmcores. In case of 32-bit only architecture, - * this can be set to zero. + * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit + * only architecture, vmcore_elf64_check_arch can be set to zero. */ +#ifndef vmcore_elf32_check_arch +#define vmcore_elf32_check_arch(x) elf_check_arch(x) +#endif + #ifndef vmcore_elf64_check_arch #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) #endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 99c94899ad0f..7cee5551625b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -47,16 +47,18 @@ #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 -#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 -#define CRYPTO_ALG_TYPE_HASH 0x00000008 -#define CRYPTO_ALG_TYPE_SHASH 0x00000009 -#define CRYPTO_ALG_TYPE_AHASH 0x0000000a +#define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d +#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e +#define CRYPTO_ALG_TYPE_HASH 0x0000000e +#define CRYPTO_ALG_TYPE_SHASH 0x0000000e +#define CRYPTO_ALG_TYPE_AHASH 0x0000000f #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_LARVAL 0x00000010 @@ -486,8 +488,6 @@ struct ablkcipher_tfm { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - int (*givencrypt)(struct skcipher_givcrypt_request *req); - int (*givdecrypt)(struct skcipher_givcrypt_request *req); struct crypto_ablkcipher *base; @@ -712,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask) * state information is unused by the kernel crypto API. */ -/** - * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * ablkcipher cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Allocate a cipher handle for an ablkcipher. The returned struct - * crypto_ablkcipher is the cipher handle that is required for any subsequent - * API invocation for that ablkcipher. - * - * Return: allocated cipher handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. - */ -struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, - u32 type, u32 mask); - static inline struct crypto_tfm *crypto_ablkcipher_tfm( struct crypto_ablkcipher *tfm) { @@ -948,8 +931,7 @@ static inline struct ablkcipher_request *ablkcipher_request_cast( * encrypt and decrypt API calls. During the allocation, the provided ablkcipher * handle is registered in the request data structure. * - * Return: allocated request handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. + * Return: allocated request handle in case of success, or NULL if out of memory */ static inline struct ablkcipher_request *ablkcipher_request_alloc( struct crypto_ablkcipher *tfm, gfp_t gfp) diff --git a/include/linux/dax.h b/include/linux/dax.h index 636dd59ab505..9c6dc7704043 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -3,45 +3,57 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/radix-tree.h> #include <asm/pgtable.h> -ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, +/* We use lowest available exceptional entry bit for locking */ +#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) + +ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, get_block_t, dio_iodone_t, int flags); -int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); -int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, - dax_iodone_t); -int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, - dax_iodone_t); +int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); +int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +void dax_wake_mapping_entry_waiter(struct address_space *mapping, + pgoff_t index, bool wake_all); #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); +void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); +int __dax_zero_page_range(struct block_device *bdev, sector_t sector, + unsigned int offset, unsigned int length); #else static inline struct page *read_dax_sector(struct block_device *bdev, sector_t n) { return ERR_PTR(-ENXIO); } +/* Shouldn't ever be called when dax is disabled. */ +static inline void dax_unlock_mapping_entry(struct address_space *mapping, + pgoff_t index) +{ + BUG(); +} +static inline int __dax_zero_page_range(struct block_device *bdev, + sector_t sector, unsigned int offset, unsigned int length) +{ + return -ENXIO; +} #endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t, dax_iodone_t); -int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t, dax_iodone_t); + unsigned int flags, get_block_t); #else static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags, get_block_t gb, - dax_iodone_t di) + pmd_t *pmd, unsigned int flags, get_block_t gb) { return VM_FAULT_FALLBACK; } -#define __dax_pmd_fault dax_pmd_fault #endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); -#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) -#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) +#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 7e9422cb5989..5ff3e9a4fe5f 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -10,6 +10,7 @@ #include <linux/cache.h> #include <linux/rcupdate.h> #include <linux/lockref.h> +#include <linux/stringhash.h> struct path; struct vfsmount; @@ -52,9 +53,6 @@ struct qstr { }; #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } -#define hashlen_hash(hashlen) ((u32) (hashlen)) -#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) -#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash)) struct dentry_stat_t { long nr_dentry; @@ -65,29 +63,6 @@ struct dentry_stat_t { }; extern struct dentry_stat_t dentry_stat; -/* Name hashing routines. Initial hash value */ -/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ -#define init_name_hash() 0 - -/* partial hash update function. Assume roughly 4 bits per character */ -static inline unsigned long -partial_name_hash(unsigned long c, unsigned long prevhash) -{ - return (prevhash + (c << 4) + (c >> 4)) * 11; -} - -/* - * Finally: cut down the number of bits to a int value (and try to avoid - * losing bits) - */ -static inline unsigned long end_name_hash(unsigned long hash) -{ - return (unsigned int) hash; -} - -/* Compute the hash for a name string. */ -extern unsigned int full_name_hash(const unsigned char *, unsigned int); - /* * Try to keep struct dentry aligned on 64 byte cachelines (this will * give reasonable cacheline footprint with larger lines without the @@ -123,7 +98,10 @@ struct dentry { unsigned long d_time; /* used by d_revalidate */ void *d_fsdata; /* fs-specific data */ - struct list_head d_lru; /* LRU list */ + union { + struct list_head d_lru; /* LRU list */ + wait_queue_head_t *d_wait; /* in-lookup ones only */ + }; struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ /* @@ -131,6 +109,7 @@ struct dentry { */ union { struct hlist_node d_alias; /* inode alias list */ + struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; }; @@ -151,17 +130,18 @@ struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); - int (*d_compare)(const struct dentry *, const struct dentry *, + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool); - struct inode *(*d_select_inode)(struct dentry *, unsigned); - struct dentry *(*d_real)(struct dentry *, struct inode *); + struct dentry *(*d_real)(struct dentry *, const struct inode *, + unsigned int); } ____cacheline_aligned; /* @@ -227,10 +207,11 @@ struct dentry_operations { #define DCACHE_MAY_FREE 0x00800000 #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ -#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ +#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */ +#define DCACHE_OP_REAL 0x04000000 -#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */ -#define DCACHE_OP_REAL 0x08000000 +#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ +#define DCACHE_DENTRY_CURSOR 0x20000000 extern seqlock_t rename_lock; @@ -248,6 +229,8 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); +extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, + wait_queue_head_t *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); @@ -280,7 +263,7 @@ extern void d_rehash(struct dentry *); extern void d_add(struct dentry *, struct inode *); -extern void dentry_update_name_case(struct dentry *, struct qstr *); +extern void dentry_update_name_case(struct dentry *, const struct qstr *); /* used for rename() and baskets */ extern void d_move(struct dentry *, struct dentry *); @@ -367,6 +350,22 @@ static inline void dont_mount(struct dentry *dentry) spin_unlock(&dentry->d_lock); } +extern void __d_lookup_done(struct dentry *); + +static inline int d_in_lookup(struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_PAR_LOOKUP; +} + +static inline void d_lookup_done(struct dentry *dentry) +{ + if (unlikely(d_in_lookup(dentry))) { + spin_lock(&dentry->d_lock); + __d_lookup_done(dentry); + spin_unlock(&dentry->d_lock); + } +} + extern void dput(struct dentry *); static inline bool d_managed(const struct dentry *dentry) @@ -557,23 +556,37 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) return upper; } -static inline struct dentry *d_real(struct dentry *dentry) +/** + * d_real - Return the real dentry + * @dentry: the dentry to query + * @inode: inode to select the dentry from multiple layers (can be NULL) + * @flags: open flags to control copy-up behavior + * + * If dentry is on an union/overlay, then return the underlying, real dentry. + * Otherwise return the dentry itself. + * + * See also: Documentation/filesystems/vfs.txt + */ +static inline struct dentry *d_real(struct dentry *dentry, + const struct inode *inode, + unsigned int flags) { if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) - return dentry->d_op->d_real(dentry, NULL); + return dentry->d_op->d_real(dentry, inode, flags); else return dentry; } -static inline struct inode *vfs_select_inode(struct dentry *dentry, - unsigned open_flags) +/** + * d_real_inode - Return the real inode + * @dentry: The dentry to query + * + * If dentry is on an union/overlay, then return the underlying, real inode. + * Otherwise return d_inode(). + */ +static inline struct inode *d_real_inode(struct dentry *dentry) { - struct inode *inode = d_inode(dentry); - - if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE)) - inode = dentry->d_op->d_select_inode(dentry, open_flags); - - return inode; + return d_backing_inode(d_real(dentry, NULL, 0)); } diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 981e53ab84e8..1438e2322d5c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -19,9 +19,11 @@ #include <linux/seq_file.h> #include <linux/types.h> +#include <linux/compiler.h> struct device; struct file_operations; +struct srcu_struct; struct debugfs_blob_wrapper { void *data; @@ -41,14 +43,16 @@ struct debugfs_regset32 { extern struct dentry *arch_debugfs_dir; -#if defined(CONFIG_DEBUG_FS) +extern struct srcu_struct debugfs_srcu; -/* declared over in file.c */ -extern const struct file_operations debugfs_file_operations; +#if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); +struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); struct dentry *debugfs_create_file_size(const char *name, umode_t mode, struct dentry *parent, void *data, @@ -68,6 +72,31 @@ struct dentry *debugfs_create_automount(const char *name, void debugfs_remove(struct dentry *dentry); void debugfs_remove_recursive(struct dentry *dentry); +int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx) + __acquires(&debugfs_srcu); + +void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu); + +ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); +ssize_t debugfs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); + +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = debugfs_attr_read, \ + .write = debugfs_attr_write, \ + .llseek = generic_file_llseek, \ +} + struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -176,6 +205,20 @@ static inline void debugfs_remove(struct dentry *dentry) static inline void debugfs_remove_recursive(struct dentry *dentry) { } +static inline int debugfs_use_file_start(const struct dentry *dentry, + int *srcu_idx) + __acquires(&debugfs_srcu) +{ + return 0; +} + +static inline void debugfs_use_file_finish(int srcu_idx) + __releases(&debugfs_srcu) +{ } + +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ + static const struct file_operations __fops = { 0 } + static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) { diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 98ffcbd4888e..d82bf1994485 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -38,8 +38,10 @@ struct debug_obj { * @name: name of the object typee * @debug_hint: function returning address, which have associated * kernel symbol, to allow identify the object + * @is_static_object: return true if the obj is static, otherwise return false * @fixup_init: fixup function, which is called when the init check - * fails + * fails. All fixup functions must return true if fixup + * was successful, otherwise return false * @fixup_activate: fixup function, which is called when the activate check * fails * @fixup_destroy: fixup function, which is called when the destroy check @@ -51,12 +53,13 @@ struct debug_obj { */ struct debug_obj_descr { const char *name; - void *(*debug_hint) (void *addr); - int (*fixup_init) (void *addr, enum debug_obj_state state); - int (*fixup_activate) (void *addr, enum debug_obj_state state); - int (*fixup_destroy) (void *addr, enum debug_obj_state state); - int (*fixup_free) (void *addr, enum debug_obj_state state); - int (*fixup_assert_init)(void *addr, enum debug_obj_state state); + void *(*debug_hint)(void *addr); + bool (*is_static_object)(void *addr); + bool (*fixup_init)(void *addr, enum debug_obj_state state); + bool (*fixup_activate)(void *addr, enum debug_obj_state state); + bool (*fixup_destroy)(void *addr, enum debug_obj_state state); + bool (*fixup_free)(void *addr, enum debug_obj_state state); + bool (*fixup_assert_init)(void *addr, enum debug_obj_state state); }; #ifdef CONFIG_DEBUG_OBJECTS diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h index c0a360e99f64..269521f143ac 100644 --- a/include/linux/devcoredump.h +++ b/include/linux/devcoredump.h @@ -1,3 +1,22 @@ +/* + * This file is provided under the GPLv2 license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + */ #ifndef __DEVCOREDUMP_H #define __DEVCOREDUMP_H @@ -5,17 +24,62 @@ #include <linux/module.h> #include <linux/vmalloc.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> + +/* + * _devcd_free_sgtable - free all the memory of the given scatterlist table + * (i.e. both pages and scatterlist instances) + * NOTE: if two tables allocated and chained using the sg_chain function then + * this function should be called only once on the first table + * @table: pointer to sg_table to free + */ +static inline void _devcd_free_sgtable(struct scatterlist *table) +{ + int i; + struct page *page; + struct scatterlist *iter; + struct scatterlist *delete_iter; + + /* free pages */ + iter = table; + for_each_sg(table, iter, sg_nents(table), i) { + page = sg_page(iter); + if (page) + __free_page(page); + } + + /* then free all chained tables */ + iter = table; + delete_iter = table; /* always points on a head of a table */ + while (!sg_is_last(iter)) { + iter++; + if (sg_is_chain(iter)) { + iter = sg_chain_ptr(iter); + kfree(delete_iter); + delete_iter = iter; + } + } + + /* free the last table */ + kfree(delete_iter); +} + + #ifdef CONFIG_DEV_COREDUMP -void dev_coredumpv(struct device *dev, const void *data, size_t datalen, +void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp); void dev_coredumpm(struct device *dev, struct module *owner, - const void *data, size_t datalen, gfp_t gfp, + void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen), - void (*free)(const void *data)); + void *data, size_t datalen), + void (*free)(void *data)); + +void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp); #else -static inline void dev_coredumpv(struct device *dev, const void *data, +static inline void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) { vfree(data); @@ -23,13 +87,19 @@ static inline void dev_coredumpv(struct device *dev, const void *data, static inline void dev_coredumpm(struct device *dev, struct module *owner, - const void *data, size_t datalen, gfp_t gfp, + void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen), - void (*free)(const void *data)) + void *data, size_t datalen), + void (*free)(void *data)) { free(data); } + +static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp) +{ + _devcd_free_sgtable(table); +} #endif /* CONFIG_DEV_COREDUMP */ #endif /* __DEVCOREDUMP_H */ diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 6fa02a20eb63..2de4e2eea180 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -19,6 +19,13 @@ #define DEVFREQ_NAME_LEN 16 +/* DEVFREQ notifier interface */ +#define DEVFREQ_TRANSITION_NOTIFIER (0) + +/* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */ +#define DEVFREQ_PRECHANGE (0) +#define DEVFREQ_POSTCHANGE (1) + struct devfreq; /** @@ -143,6 +150,7 @@ struct devfreq_governor { * @trans_table: Statistics of devfreq transitions * @time_in_state: Statistics of devfreq states * @last_stat_updated: The last time stat updated + * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier * * This structure stores the devfreq information for a give device. * @@ -177,6 +185,13 @@ struct devfreq { unsigned int *trans_table; unsigned long *time_in_state; unsigned long last_stat_updated; + + struct srcu_notifier_head transition_notifier_list; +}; + +struct devfreq_freqs { + unsigned long old; + unsigned long new; }; #if defined(CONFIG_PM_DEVFREQ) @@ -207,6 +222,22 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq); extern void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); +extern int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index); /** * devfreq_update_stats() - update the last_status pointer in struct devfreq @@ -241,6 +272,39 @@ struct devfreq_simple_ondemand_data { }; #endif +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) +/** + * struct devfreq_passive_data - void *data fed to struct devfreq + * and devfreq_add_device + * @parent: the devfreq instance of parent device. + * @get_target_freq: Optional callback, Returns desired operating frequency + * for the device using passive governor. That is called + * when passive governor should decide the next frequency + * by using the new frequency of parent devfreq device + * using governors except for passive governor. + * If the devfreq device has the specific method to decide + * the next frequency, should use this callback. + * @this: the devfreq instance of own device. + * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list + * + * The devfreq_passive_data have to set the devfreq instance of parent + * device with governors except for the passive governor. But, don't need to + * initialize the 'this' and 'nb' field because the devfreq core will handle + * them. + */ +struct devfreq_passive_data { + /* Should set the devfreq instance of parent device */ + struct devfreq *parent; + + /* Optional callback to decide the next frequency of passvice device */ + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + + /* For passive governor's internal use. Don't need to set them */ + struct devfreq *this; + struct notifier_block nb; +}; +#endif + #else /* !CONFIG_PM_DEVFREQ */ static inline struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, @@ -307,6 +371,41 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, { } +static inline int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ +} + +static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index) +{ + return ERR_PTR(-ENODEV); +} + static inline int devfreq_update_stats(struct devfreq *df) { return -EINVAL; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 0830c9e86f0d..91acfce74a22 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -19,6 +19,15 @@ struct dm_table; struct mapped_device; struct bio_vec; +/* + * Type of table, mapped_device's mempool and request_queue + */ +#define DM_TYPE_NONE 0 +#define DM_TYPE_BIO_BASED 1 +#define DM_TYPE_REQUEST_BASED 2 +#define DM_TYPE_MQ_REQUEST_BASED 3 +#define DM_TYPE_DAX_BIO_BASED 4 + typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { @@ -116,6 +125,14 @@ typedef void (*dm_io_hints_fn) (struct dm_target *ti, */ typedef int (*dm_busy_fn) (struct dm_target *ti); +/* + * Returns: + * < 0 : error + * >= 0 : the number of bytes accessible at the address + */ +typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector, + void **kaddr, pfn_t *pfn, long size); + void dm_error(const char *message); struct dm_dev { @@ -162,6 +179,7 @@ struct target_type { dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; + dm_direct_access_fn direct_access; /* For internal device-mapper use. */ struct list_head list; @@ -444,6 +462,14 @@ int dm_table_add_target(struct dm_table *t, const char *type, void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); /* + * Target can use this to set the table's type. + * Can only ever be called from a target's ctr. + * Useful for "hybrid" target (supports both bio-based + * and request-based). + */ +void dm_table_set_type(struct dm_table *t, unsigned type); + +/* * Finally call this to make the table ready for use. */ int dm_table_complete(struct dm_table *t); diff --git a/include/linux/device.h b/include/linux/device.h index 002c59728dbe..38f02814d53a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -609,14 +609,14 @@ typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid, const char *name); + int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) #else extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid); + int nid) __malloc; static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); @@ -648,12 +648,12 @@ extern void devres_remove_group(struct device *dev, void *id); extern int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ -extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); +extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; extern __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, - va_list ap); + va_list ap) __malloc; extern __printf(3, 4) -char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); @@ -671,7 +671,7 @@ static inline void *devm_kcalloc(struct device *dev, return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } extern void devm_kfree(struct device *dev, void *p); -extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); @@ -956,11 +956,6 @@ static inline bool device_async_suspend_enabled(struct device *dev) return !!dev->power.async_suspend; } -static inline void pm_suspend_ignore_children(struct device *dev, bool enable) -{ - dev->power.ignore_children = enable; -} - static inline void dev_pm_syscore_device(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP @@ -1293,8 +1288,11 @@ do { \ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ } while (0) #else -#define dev_dbg_ratelimited(dev, fmt, ...) \ - no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + if (0) \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ +} while (0) #endif #ifdef VERBOSE_DEBUG diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 5871f292b596..277ab9af9ac2 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -15,13 +15,12 @@ #include <linux/errno.h> -struct pts_fs_info; - #ifdef CONFIG_UNIX98_PTYS -/* Look up a pts fs info and get a ref to it */ -struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); -void devpts_put_ref(struct pts_fs_info *); +struct pts_fs_info; + +struct pts_fs_info *devpts_acquire(struct file *); +void devpts_release(struct pts_fs_info *); int devpts_new_index(struct pts_fs_info *); void devpts_kill_index(struct pts_fs_info *, int); diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index a68cbe59e6ad..b91b023deffb 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -57,7 +57,8 @@ struct dm_io_notify { */ struct dm_io_client; struct dm_io_request { - int bi_rw; /* READ|WRITE - not READA */ + int bi_op; /* REQ_OP */ + int bi_op_flags; /* rq_flag_bits */ struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_client *client; /* Client memory handler */ diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h deleted file mode 100644 index 5246239a4953..000000000000 --- a/include/linux/dma-attrs.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef _DMA_ATTR_H -#define _DMA_ATTR_H - -#include <linux/bitmap.h> -#include <linux/bitops.h> -#include <linux/bug.h> - -/** - * an enum dma_attr represents an attribute associated with a DMA - * mapping. The semantics of each attribute should be defined in - * Documentation/DMA-attributes.txt. - */ -enum dma_attr { - DMA_ATTR_WRITE_BARRIER, - DMA_ATTR_WEAK_ORDERING, - DMA_ATTR_WRITE_COMBINE, - DMA_ATTR_NON_CONSISTENT, - DMA_ATTR_NO_KERNEL_MAPPING, - DMA_ATTR_SKIP_CPU_SYNC, - DMA_ATTR_FORCE_CONTIGUOUS, - DMA_ATTR_ALLOC_SINGLE_PAGES, - DMA_ATTR_MAX, -}; - -#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) - -/** - * struct dma_attrs - an opaque container for DMA attributes - * @flags - bitmask representing a collection of enum dma_attr - */ -struct dma_attrs { - unsigned long flags[__DMA_ATTRS_LONGS]; -}; - -#define DEFINE_DMA_ATTRS(x) \ - struct dma_attrs x = { \ - .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ - } - -static inline void init_dma_attrs(struct dma_attrs *attrs) -{ - bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); -} - -/** - * dma_set_attr - set a specific attribute - * @attr: attribute to set - * @attrs: struct dma_attrs (may be NULL) - */ -static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ - if (attrs == NULL) - return; - BUG_ON(attr >= DMA_ATTR_MAX); - __set_bit(attr, attrs->flags); -} - -/** - * dma_get_attr - check for a specific attribute - * @attr: attribute to set - * @attrs: struct dma_attrs (may be NULL) - */ -static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ - if (attrs == NULL) - return 0; - BUG_ON(attr >= DMA_ATTR_MAX); - return test_bit(attr, attrs->flags); -} - -#endif /* _DMA_ATTR_H */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3fe90d494edb..e0b0741ae671 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -112,19 +112,24 @@ struct dma_buf_ops { * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached. * @ops: dma_buf_ops associated with this buffer object. + * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap + * @vmapping_counter: used internally to refcnt the vmaps + * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @exp_name: name of the exporter; useful for debugging. * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. * @priv: exporter specific private data for this buffer object. * @resv: reservation object linked to this dma-buf + * @poll: for userspace poll support + * @cb_excl: for userspace poll support + * @cb_shared: for userspace poll support */ struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; - /* mutex to serialize list manipulation, attach/detach and vmap/unmap */ struct mutex lock; unsigned vmapping_counter; void *vmap_ptr; @@ -188,9 +193,11 @@ struct dma_buf_export_info { /** * helper macro for exporters; zeros and fills in most common values + * + * @name: export-info name */ -#define DEFINE_DMA_BUF_EXPORT_INFO(a) \ - struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ +#define DEFINE_DMA_BUF_EXPORT_INFO(name) \ + struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ .owner = THIS_MODULE } /** @@ -235,6 +242,4 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); void *dma_buf_vmap(struct dma_buf *); void dma_buf_vunmap(struct dma_buf *, void *vaddr); -int dma_buf_debugfs_create_file(const char *name, - int (*write)(struct seq_file *)); #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index fc481037478a..81c5c8d167ad 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); * These implement the bulk of the relevant DMA mapping callbacks, but require * the arch code to take care of attributes and cache maintenance */ -struct page **iommu_dma_alloc(struct device *dev, size_t size, - gfp_t gfp, int prot, dma_addr_t *handle, +struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, + unsigned long attrs, int prot, dma_addr_t *handle, void (*flush_page)(struct device *, const void *, phys_addr_t)); void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle); @@ -56,9 +56,9 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * directly as DMA mapping callbacks for simplicity */ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, unsigned long attrs); void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, unsigned long attrs); int iommu_dma_supported(struct device *dev, u64 mask); int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 9ea9aba28049..dc69df04abc1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -5,13 +5,58 @@ #include <linux/string.h> #include <linux/device.h> #include <linux/err.h> -#include <linux/dma-attrs.h> #include <linux/dma-debug.h> #include <linux/dma-direction.h> #include <linux/scatterlist.h> #include <linux/kmemcheck.h> #include <linux/bug.h> +/** + * List of possible attributes associated with a DMA mapping. The semantics + * of each attribute should be defined in Documentation/DMA-attributes.txt. + * + * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute + * forces all pending DMA writes to complete. + */ +#define DMA_ATTR_WRITE_BARRIER (1UL << 0) +/* + * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping + * may be weakly ordered, that is that reads and writes may pass each other. + */ +#define DMA_ATTR_WEAK_ORDERING (1UL << 1) +/* + * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be + * buffered to improve performance. + */ +#define DMA_ATTR_WRITE_COMBINE (1UL << 2) +/* + * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either + * consistent or non-consistent memory as it sees fit. + */ +#define DMA_ATTR_NON_CONSISTENT (1UL << 3) +/* + * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel + * virtual mapping for the allocated buffer. + */ +#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) +/* + * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of + * the CPU cache for the given buffer assuming that it has been already + * transferred to 'device' domain. + */ +#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) +/* + * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer + * in physical memory. + */ +#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) +/* + * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem + * that it's probably not worth the time to try to allocate memory to in a way + * that gives better TLB efficiency. + */ +#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) + /* * A dma_addr_t can hold any valid DMA or bus address for the platform. * It can be given to a device to use as a DMA source or target. A CPU cannot @@ -21,34 +66,35 @@ struct dma_map_ops { void* (*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs); + unsigned long attrs); void (*free)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs); + unsigned long attrs); int (*mmap)(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t, struct dma_attrs *attrs); + void *, dma_addr_t, size_t, + unsigned long attrs); int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, - dma_addr_t, size_t, struct dma_attrs *attrs); + dma_addr_t, size_t, unsigned long attrs); dma_addr_t (*map_page)(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); /* * map_sg returns 0 on error and a value > 0 on success. * It should never return a value < 0. */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); @@ -123,7 +169,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; @@ -142,7 +188,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -158,7 +204,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, */ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); int i, ents; @@ -176,7 +222,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -195,7 +241,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, kmemcheck_mark_initialized(page_address(page) + offset, size); BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, page, offset, size, dir, NULL); + addr = ops->map_page(dev, page, offset, size, dir, 0); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return addr; @@ -208,7 +254,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, NULL); + ops->unmap_page(dev, addr, size, dir, 0); debug_dma_unmap_page(dev, addr, size, dir, false); } @@ -289,10 +335,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } -#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) -#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) -#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) -#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); @@ -321,7 +367,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); */ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) + dma_addr_t dma_addr, size_t size, unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); @@ -330,7 +376,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); } -#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, @@ -338,7 +384,8 @@ dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, static inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) + dma_addr_t dma_addr, size_t size, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); @@ -348,7 +395,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); } -#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) #ifndef arch_dma_alloc_attrs #define arch_dma_alloc_attrs(dev, flag) (true) @@ -356,7 +403,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; @@ -378,7 +425,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, static inline void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -398,31 +445,27 @@ static inline void dma_free_attrs(struct device *dev, size_t size, static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); + return dma_alloc_attrs(dev, size, dma_handle, flag, 0); } static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); + return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); } static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, gfp, + DMA_ATTR_NON_CONSISTENT); } static inline void dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); + dma_free_attrs(dev, size, cpu_addr, dma_handle, + DMA_ATTR_NON_CONSISTENT); } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -514,7 +557,7 @@ extern u64 dma_get_required_mask(struct device *dev); #ifndef arch_setup_dma_ops static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, struct iommu_ops *iommu, + u64 size, const struct iommu_ops *iommu, bool coherent) { } #endif @@ -646,9 +689,8 @@ static inline void dmam_release_declared_memory(struct device *dev) static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); + return dma_alloc_attrs(dev, size, dma_addr, gfp, + DMA_ATTR_WRITE_COMBINE); } #ifndef dma_alloc_writecombine #define dma_alloc_writecombine dma_alloc_wc @@ -657,9 +699,8 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size, static inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_addr, + DMA_ATTR_WRITE_COMBINE); } #ifndef dma_free_writecombine #define dma_free_writecombine dma_free_wc @@ -670,15 +711,14 @@ static inline int dma_mmap_wc(struct device *dev, void *cpu_addr, dma_addr_t dma_addr, size_t size) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, + DMA_ATTR_WRITE_COMBINE); } #ifndef dma_mmap_writecombine #define dma_mmap_writecombine dma_mmap_wc #endif -#ifdef CONFIG_NEED_DMA_MAP_STATE +#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index 71456442ebe3..f2e538aaddad 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -27,6 +27,7 @@ struct dw_dma; * @regs: memory mapped I/O space * @clk: hclk clock * @dw: struct dw_dma that is filed by dw_dma_probe() + * @pdata: pointer to platform data */ struct dw_dma_chip { struct device *dev; @@ -34,10 +35,12 @@ struct dw_dma_chip { void __iomem *regs; struct clk *clk; struct dw_dma *dw; + + const struct dw_dma_platform_data *pdata; }; /* Export to the platform drivers */ -int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); +int dw_dma_probe(struct dw_dma_chip *chip); int dw_dma_remove(struct dw_dma_chip *chip); /* DMA API extensions */ diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index 79df69dc629c..aaff68efba5d 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h @@ -39,14 +39,22 @@ struct hsu_dma_chip { #if IS_ENABLED(CONFIG_HSU_DMA) /* Export to the internal users */ -irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); +int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, + u32 *status); +irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, + u32 status); /* Export to the platform drivers */ int hsu_dma_probe(struct hsu_dma_chip *chip); int hsu_dma_remove(struct hsu_dma_chip *chip); #else -static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, - unsigned short nr) +static inline int hsu_dma_get_status(struct hsu_dma_chip *chip, + unsigned short nr, u32 *status) +{ + return 0; +} +static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, + unsigned short nr, u32 status) { return IRQ_NONE; } diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h index 34b98f276ed0..3ae300052553 100644 --- a/include/linux/dma/xilinx_dma.h +++ b/include/linux/dma/xilinx_dma.h @@ -41,6 +41,20 @@ struct xilinx_vdma_config { int ext_fsync; }; +/** + * enum xdma_ip_type: DMA IP type. + * + * XDMA_TYPE_AXIDMA: Axi dma ip. + * XDMA_TYPE_CDMA: Axi cdma ip. + * XDMA_TYPE_VDMA: Axi vdma ip. + * + */ +enum xdma_ip_type { + XDMA_TYPE_AXIDMA = 0, + XDMA_TYPE_CDMA, + XDMA_TYPE_VDMA, +}; + int xilinx_vdma_channel_set_config(struct dma_chan *dchan, struct xilinx_vdma_config *cfg); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 017433712833..30de0197263a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( sg_dma_address(&sg) = buf; sg_dma_len(&sg) = len; + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags, NULL); } @@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, NULL); } @@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( enum dma_transfer_direction dir, unsigned long flags, struct rio_dma_ext *rio_ext) { + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, rio_ext); } @@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) + return NULL; + return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, period_len, dir, flags); } @@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) + return NULL; + return chan->device->device_prep_interleaved_dma(chan, xt, flags); } @@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { - if (!chan || !chan->device) + if (!chan || !chan->device || !chan->device->device_prep_dma_memset) return NULL; return chan->device->device_prep_dma_memset(chan, dest, value, @@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( struct scatterlist *src_sg, unsigned int src_nents, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_dma_sg) + return NULL; + return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, src_sg, src_nents, flags); } diff --git a/include/linux/drbd.h b/include/linux/drbd.h index d6b3c9943a2c..002611c85318 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -51,7 +51,7 @@ #endif extern const char *drbd_buildtag(void); -#define REL_VERSION "8.4.6" +#define REL_VERSION "8.4.7" #define API_VERSION 1 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 101 @@ -370,6 +370,14 @@ enum drbd_notification_type { NOTIFY_FLAGS = NOTIFY_CONTINUES, }; +enum drbd_peer_state { + P_INCONSISTENT = 3, + P_OUTDATED = 4, + P_DOWN = 5, + P_PRIMARY = 6, + P_FENCING = 7, +}; + #define UUID_JUST_CREATED ((__u64)4) enum write_ordering_e { diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index 2d0e5ad5de9d..c934d3a96b5e 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -123,15 +123,16 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf, __u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF) __u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF) __u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF) + __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF) + __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF) + __u32_field_def(25, 0 /* OPTIONAL */, rs_discard_granularity, DRBD_RS_DISCARD_GRANULARITY_DEF) __flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF) __flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF) __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF) __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF) - __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF) - __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF) - /* 9: __u32_field_def(22, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF) */ __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF) + __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED) ) GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts, diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 8ac8c5d9a3ad..ddac68422a96 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -126,8 +126,7 @@ #define DRBD_RESYNC_RATE_DEF 250 #define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */ - /* less than 7 would hit performance unnecessarily. */ -#define DRBD_AL_EXTENTS_MIN 7 +#define DRBD_AL_EXTENTS_MIN 67 /* we use u16 as "slot number", (u16)~0 is "FREE". * If you use >= 292 kB on-disk ring buffer, * this is the maximum you can use: */ @@ -210,6 +209,12 @@ #define DRBD_MD_FLUSHES_DEF 1 #define DRBD_TCP_CORK_DEF 1 #define DRBD_AL_UPDATES_DEF 1 +/* We used to ignore the discard_zeroes_data setting. + * To not change established (and expected) behaviour, + * by default assume that, for discard_zeroes_data=0, + * we can make that an effective discard_zeroes_data=1, + * if we only explicitly zero-out unaligned partial chunks. */ +#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1 #define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 #define DRBD_ALWAYS_ASBP_DEF 0 @@ -230,4 +235,10 @@ #define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX #define DRBD_SOCKET_CHECK_TIMEO_DEF 0 #define DRBD_SOCKET_CHECK_TIMEO_SCALE '1' + +#define DRBD_RS_DISCARD_GRANULARITY_MIN 0 +#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */ +#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */ +#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */ + #endif diff --git a/include/linux/ds17287rtc.h b/include/linux/ds17287rtc.h deleted file mode 100644 index d85d3f497b96..000000000000 --- a/include/linux/ds17287rtc.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * ds17287rtc.h - register definitions for the ds1728[57] RTC / CMOS RAM - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * (C) 2003 Guido Guenther <agx@sigxcpu.org> - */ -#ifndef __LINUX_DS17287RTC_H -#define __LINUX_DS17287RTC_H - -#include <linux/rtc.h> /* get the user-level API */ -#include <linux/mc146818rtc.h> - -/* Register A */ -#define DS_REGA_DV2 0x40 /* countdown chain */ -#define DS_REGA_DV1 0x20 /* oscillator enable */ -#define DS_REGA_DV0 0x10 /* bank select */ - -/* bank 1 registers */ -#define DS_B1_MODEL 0x40 /* model number byte */ -#define DS_B1_SN1 0x41 /* serial number byte 1 */ -#define DS_B1_SN2 0x42 /* serial number byte 2 */ -#define DS_B1_SN3 0x43 /* serial number byte 3 */ -#define DS_B1_SN4 0x44 /* serial number byte 4 */ -#define DS_B1_SN5 0x45 /* serial number byte 5 */ -#define DS_B1_SN6 0x46 /* serial number byte 6 */ -#define DS_B1_CRC 0x47 /* CRC byte */ -#define DS_B1_CENTURY 0x48 /* Century byte */ -#define DS_B1_DALARM 0x49 /* date alarm */ -#define DS_B1_XCTRL4A 0x4a /* extendec control register 4a */ -#define DS_B1_XCTRL4B 0x4b /* extendec control register 4b */ -#define DS_B1_RTCADDR2 0x4e /* rtc address 2 */ -#define DS_B1_RTCADDR3 0x4f /* rtc address 3 */ -#define DS_B1_RAMLSB 0x50 /* extended ram LSB */ -#define DS_B1_RAMMSB 0x51 /* extended ram MSB */ -#define DS_B1_RAMDPORT 0x53 /* extended ram data port */ - -/* register details */ -/* extended control register 4a */ -#define DS_XCTRL4A_VRT2 0x80 /* valid ram and time */ -#define DS_XCTRL4A_INCR 0x40 /* increment progress status */ -#define DS_XCTRL4A_BME 0x20 /* burst mode enable */ -#define DS_XCTRL4A_PAB 0x08 /* power active bar ctrl */ -#define DS_XCTRL4A_RF 0x04 /* ram clear flag */ -#define DS_XCTRL4A_WF 0x02 /* wake up alarm flag */ -#define DS_XCTRL4A_KF 0x01 /* kickstart flag */ - -/* interrupt causes */ -#define DS_XCTRL4A_IFS (DS_XCTRL4A_RF|DS_XCTRL4A_WF|DS_XCTRL4A_KF) - -/* extended control register 4b */ -#define DS_XCTRL4B_ABE 0x80 /* auxiliary battery enable */ -#define DS_XCTRL4B_E32K 0x40 /* enable 32.768 kHz Output */ -#define DS_XCTRL4B_CS 0x20 /* crystal select */ -#define DS_XCTRL4B_RCE 0x10 /* ram clear enable */ -#define DS_XCTRL4B_PRS 0x08 /* PAB resec select */ -#define DS_XCTRL4B_RIE 0x04 /* ram clear interrupt enable */ -#define DS_XCTRL4B_WFE 0x02 /* wake up alarm interrupt enable */ -#define DS_XCTRL4B_KFE 0x01 /* kickstart interrupt enable */ - -/* interrupt enable bits */ -#define DS_XCTRL4B_IFES (DS_XCTRL4B_RIE|DS_XCTRL4B_WFE|DS_XCTRL4B_KFE) - -#endif /* __LINUX_DS17287RTC_H */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 4f1bbc68cd1b..546d68057e3b 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -1,6 +1,10 @@ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +#include <linux/jump_label.h> +#endif + /* * An instance of this structure is created in a special * ELF section at every dynamic debug callsite. At runtime, @@ -33,6 +37,12 @@ struct _ddebug { #define _DPRINTK_FLAGS_DEFAULT 0 #endif unsigned int flags:8; +#ifdef HAVE_JUMP_LABEL + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; +#endif } __attribute__((aligned(8))); @@ -60,7 +70,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, const struct net_device *dev, const char *fmt, ...); -#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ +#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \ static struct _ddebug __aligned(8) \ __attribute__((section("__verbose"))) name = { \ .modname = KBUILD_MODNAME, \ @@ -68,13 +78,51 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, .filename = __FILE__, \ .format = (fmt), \ .lineno = __LINE__, \ - .flags = _DPRINTK_FLAGS_DEFAULT, \ + .flags = _DPRINTK_FLAGS_DEFAULT, \ + dd_key_init(key, init) \ } +#ifdef HAVE_JUMP_LABEL + +#define dd_key_init(key, init) key = (init) + +#ifdef DEBUG +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \ + (STATIC_KEY_TRUE_INIT)) + +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + static_branch_likely(&descriptor.key.dd_key_true) +#else +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \ + (STATIC_KEY_FALSE_INIT)) + +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + static_branch_unlikely(&descriptor.key.dd_key_false) +#endif + +#else + +#define dd_key_init(key, init) + +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0) + +#ifdef DEBUG +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + likely(descriptor.flags & _DPRINTK_FLAGS_PRINT) +#else +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) +#endif + +#endif + #define dynamic_pr_debug(fmt, ...) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ ##__VA_ARGS__); \ } while (0) @@ -82,7 +130,7 @@ do { \ #define dynamic_dev_dbg(dev, fmt, ...) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ __dynamic_dev_dbg(&descriptor, dev, fmt, \ ##__VA_ARGS__); \ } while (0) @@ -90,7 +138,7 @@ do { \ #define dynamic_netdev_dbg(dev, fmt, ...) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ __dynamic_netdev_dbg(&descriptor, dev, fmt, \ ##__VA_ARGS__); \ } while (0) @@ -100,7 +148,7 @@ do { \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ print_hex_dump(KERN_DEBUG, prefix_str, \ prefix_type, rowsize, groupsize, \ buf, len, ascii); \ diff --git a/include/linux/efi.h b/include/linux/efi.h index 1626474567ac..0148a3046b48 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -21,6 +21,8 @@ #include <linux/pfn.h> #include <linux/pstore.h> #include <linux/reboot.h> +#include <linux/uuid.h> +#include <linux/screen_info.h> #include <asm/page.h> @@ -43,17 +45,10 @@ typedef u16 efi_char16_t; /* UNICODE character */ typedef u64 efi_physical_addr_t; typedef void *efi_handle_t; - -typedef struct { - u8 b[16]; -} efi_guid_t; +typedef uuid_le efi_guid_t; #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ -((efi_guid_t) \ -{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ - (b) & 0xff, ((b) >> 8) & 0xff, \ - (c) & 0xff, ((c) >> 8) & 0xff, \ - (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) /* * Generic EFI table header @@ -123,6 +118,22 @@ typedef struct { u32 imagesize; } efi_capsule_header_t; +struct efi_boot_memmap { + efi_memory_desc_t **map; + unsigned long *map_size; + unsigned long *desc_size; + u32 *desc_ver; + unsigned long *key_ptr; + unsigned long *buff_size; +}; + +/* + * EFI capsule flags + */ +#define EFI_CAPSULE_PERSIST_ACROSS_RESET 0x00010000 +#define EFI_CAPSULE_POPULATE_SYSTEM_TABLE 0x00020000 +#define EFI_CAPSULE_INITIATE_RESET 0x00040000 + /* * Allocation types for calls to boottime->allocate_pages. */ @@ -282,9 +293,10 @@ typedef struct { efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); void *__reserved; void *register_protocol_notify; - void *locate_handle; + efi_status_t (*locate_handle)(int, efi_guid_t *, void *, + unsigned long *, efi_handle_t *); void *locate_device_path; - void *install_configuration_table; + efi_status_t (*install_configuration_table)(efi_guid_t *, void *); void *load_image; void *start_image; void *exit; @@ -533,95 +545,58 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes, void efi_native_runtime_setup(void); /* - * EFI Configuration Table and GUID definitions + * EFI Configuration Table and GUID definitions + * + * These are all defined in a single line to make them easier to + * grep for and to see them at a glance - while still having a + * similar structure to the definitions in the spec. + * + * Here's how they are structured: + * + * GUID: 12345678-1234-1234-1234-123456789012 + * Spec: + * #define EFI_SOME_PROTOCOL_GUID \ + * {0x12345678,0x1234,0x1234,\ + * {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}} + * Here: + * #define SOME_PROTOCOL_GUID EFI_GUID(0x12345678, 0x1234, 0x1234, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12) + * ^ tabs ^extra space + * + * Note that the 'extra space' separates the values at the same place + * where the UEFI SPEC breaks the line. */ -#define NULL_GUID \ - EFI_GUID(0x00000000, 0x0000, 0x0000, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) - -#define MPS_TABLE_GUID \ - EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define ACPI_TABLE_GUID \ - EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define ACPI_20_TABLE_GUID \ - EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \ - 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) - -#define SMBIOS_TABLE_GUID \ - EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define SMBIOS3_TABLE_GUID \ - EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \ - 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) - -#define SAL_SYSTEM_TABLE_GUID \ - EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define HCDP_TABLE_GUID \ - EFI_GUID(0xf951938d, 0x620b, 0x42ef, \ - 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) - -#define UGA_IO_PROTOCOL_GUID \ - EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \ - 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) +#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) +#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) +#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) +#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) +#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) +#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) +#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) +#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) +#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) +#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) +#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) +#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) +#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) +#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) +#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) +#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) -#define EFI_GLOBAL_VARIABLE_GUID \ - EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \ - 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) - -#define UV_SYSTEM_TABLE_GUID \ - EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \ - 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) - -#define LINUX_EFI_CRASH_GUID \ - EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \ - 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) - -#define LOADED_IMAGE_PROTOCOL_GUID \ - EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \ - 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \ - EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \ - 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) - -#define EFI_UGA_PROTOCOL_GUID \ - EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \ - 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) - -#define EFI_PCI_IO_PROTOCOL_GUID \ - EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \ - 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) - -#define EFI_FILE_INFO_ID \ - EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \ - 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define EFI_SYSTEM_RESOURCE_TABLE_GUID \ - EFI_GUID(0xb122a263, 0x3661, 0x4f68, \ - 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) - -#define EFI_FILE_SYSTEM_GUID \ - EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \ - 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define DEVICE_TREE_GUID \ - EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \ - 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) - -#define EFI_PROPERTIES_TABLE_GUID \ - EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \ - 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) - -#define EFI_RNG_PROTOCOL_GUID \ - EFI_GUID(0x3152bca5, 0xeade, 0x433d, \ - 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) +/* + * This GUID is used to pass to the kernel proper the struct screen_info + * structure that was populated by the stub based on the GOP protocol instance + * associated with ConOut + */ +#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) +#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) typedef struct { efi_guid_t guid; @@ -847,6 +822,14 @@ typedef struct { #define EFI_INVALID_TABLE_ADDR (~0UL) +typedef struct { + u32 version; + u32 num_entries; + u32 desc_size; + u32 reserved; + efi_memory_desc_t entry[0]; +} efi_memory_attributes_table_t; + /* * All runtime access to EFI goes through this structure: */ @@ -868,6 +851,7 @@ extern struct efi { unsigned long config_table; /* config tables */ unsigned long esrt; /* ESRT table */ unsigned long properties_table; /* properties table */ + unsigned long mem_attr_table; /* memory attributes table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -883,7 +867,7 @@ extern struct efi { efi_get_next_high_mono_count_t *get_next_high_mono_count; efi_reset_system_t *reset_system; efi_set_virtual_address_map_t *set_virtual_address_map; - struct efi_memory_map *memmap; + struct efi_memory_map memmap; unsigned long flags; } efi; @@ -904,7 +888,7 @@ extern void efi_init (void); extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); -extern void efi_gettimeofday (struct timespec *ts); +extern void efi_gettimeofday (struct timespec64 *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ #ifdef CONFIG_X86 extern void efi_late_init(void); @@ -942,10 +926,8 @@ extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern void efi_get_time(struct timespec *now); extern void efi_reserve_boot_services(void); extern int efi_get_fdt_params(struct efi_fdt_params *params); -extern struct efi_memory_map memmap; extern struct kobject *efi_kobj; extern int efi_reboot_quirk_mode; @@ -957,12 +939,34 @@ extern void __init efi_fake_memmap(void); static inline void efi_fake_memmap(void) { } #endif +/* + * efi_memattr_perm_setter - arch specific callback function passed into + * efi_memattr_apply_permissions() that updates the + * mapping permissions described by the second + * argument in the page tables referred to by the + * first argument. + */ +typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *); + +extern int efi_memattr_init(void); +extern int efi_memattr_apply_permissions(struct mm_struct *mm, + efi_memattr_perm_setter fn); + /* Iterate through an efi_memory_map */ -#define for_each_efi_memory_desc(m, md) \ +#define for_each_efi_memory_desc_in_map(m, md) \ for ((md) = (m)->map; \ - (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ + (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ (md) = (void *)(md) + (m)->desc_size) +/** + * for_each_efi_memory_desc - iterate over descriptors in efi.memmap + * @md: the efi_memory_desc_t * iterator + * + * Once the loop finishes @md must not be accessed. + */ +#define for_each_efi_memory_desc(md) \ + for_each_efi_memory_desc_in_map(&efi.memmap, md) + /* * Format an EFI memory descriptor's type and attributes to a user-provided * character buffer, as per snprintf(), and return the buffer. @@ -1000,7 +1004,6 @@ extern int __init efi_setup_pcdp_console(char *); * possible, remove EFI-related code altogether. */ #define EFI_BOOT 0 /* Were we booted from EFI? */ -#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */ #define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */ #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ @@ -1026,8 +1029,16 @@ static inline bool efi_enabled(int feature) } static inline void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} + +static inline bool +efi_capsule_pending(int *reset_type) +{ + return false; +} #endif +extern int efi_status_to_err(efi_status_t status); + /* * Variable Attributes */ @@ -1050,7 +1061,7 @@ efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")) * not including trailing NUL */ -#define EFI_VARIABLE_GUID_LEN 36 +#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN /* * The type of search to perform when calling boottime->locate_handle @@ -1180,6 +1191,80 @@ struct efi_simple_text_output_protocol { void *test_string; }; +#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0 +#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1 +#define PIXEL_BIT_MASK 2 +#define PIXEL_BLT_ONLY 3 +#define PIXEL_FORMAT_MAX 4 + +struct efi_pixel_bitmask { + u32 red_mask; + u32 green_mask; + u32 blue_mask; + u32 reserved_mask; +}; + +struct efi_graphics_output_mode_info { + u32 version; + u32 horizontal_resolution; + u32 vertical_resolution; + int pixel_format; + struct efi_pixel_bitmask pixel_information; + u32 pixels_per_scan_line; +} __packed; + +struct efi_graphics_output_protocol_mode_32 { + u32 max_mode; + u32 mode; + u32 info; + u32 size_of_info; + u64 frame_buffer_base; + u32 frame_buffer_size; +} __packed; + +struct efi_graphics_output_protocol_mode_64 { + u32 max_mode; + u32 mode; + u64 info; + u64 size_of_info; + u64 frame_buffer_base; + u64 frame_buffer_size; +} __packed; + +struct efi_graphics_output_protocol_mode { + u32 max_mode; + u32 mode; + unsigned long info; + unsigned long size_of_info; + u64 frame_buffer_base; + unsigned long frame_buffer_size; +} __packed; + +struct efi_graphics_output_protocol_32 { + u32 query_mode; + u32 set_mode; + u32 blt; + u32 mode; +}; + +struct efi_graphics_output_protocol_64 { + u64 query_mode; + u64 set_mode; + u64 blt; + u64 mode; +}; + +struct efi_graphics_output_protocol { + unsigned long query_mode; + unsigned long set_mode; + unsigned long blt; + struct efi_graphics_output_protocol_mode *mode; +}; + +typedef efi_status_t (*efi_graphics_output_protocol_query_mode)( + struct efi_graphics_output_protocol *, u32, unsigned long *, + struct efi_graphics_output_mode_info **); + extern struct list_head efivar_sysfs_list; static inline void @@ -1195,8 +1280,7 @@ int efivars_unregister(struct efivars *efivars); struct kobject *efivars_kobject(void); int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), - void *data, bool atomic, bool duplicates, - struct list_head *head); + void *data, bool duplicates, struct list_head *head); void efivar_entry_add(struct efivar_entry *entry, struct list_head *head); void efivar_entry_remove(struct efivar_entry *entry); @@ -1242,6 +1326,13 @@ int efivars_sysfs_init(void); #define EFIVARS_DATA_SIZE_MAX 1024 #endif /* CONFIG_EFI_VARS */ +extern bool efi_capsule_pending(int *reset_type); + +extern int efi_capsule_supported(efi_guid_t guid, u32 flags, + size_t size, int *reset); + +extern int efi_capsule_update(efi_capsule_header_t *capsule, + struct page **pages); #ifdef CONFIG_EFI_RUNTIME_MAP int efi_runtime_map_init(struct kobject *); @@ -1289,11 +1380,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, efi_loaded_image_t *image, int *cmd_line_len); efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, - efi_memory_desc_t **map, - unsigned long *map_size, - unsigned long *desc_size, - u32 *desc_ver, - unsigned long *key_ptr); + struct efi_boot_memmap *map); efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, @@ -1319,5 +1406,70 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, efi_status_t efi_parse_options(char *cmdline); +efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, + struct screen_info *si, efi_guid_t *proto, + unsigned long size); + bool efi_runtime_disabled(void); +extern void efi_call_virt_check_flags(unsigned long flags, const char *call); + +/* + * Arch code can implement the following three template macros, avoiding + * reptition for the void/non-void return cases of {__,}efi_call_virt(): + * + * * arch_efi_call_virt_setup() + * + * Sets up the environment for the call (e.g. switching page tables, + * allowing kernel-mode use of floating point, if required). + * + * * arch_efi_call_virt() + * + * Performs the call. The last expression in the macro must be the call + * itself, allowing the logic to be shared by the void and non-void + * cases. + * + * * arch_efi_call_virt_teardown() + * + * Restores the usual kernel environment once the call has returned. + */ + +#define efi_call_virt_pointer(p, f, args...) \ +({ \ + efi_status_t __s; \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + __s = arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ + \ + __s; \ +}) + +#define __efi_call_virt_pointer(p, f, args...) \ +({ \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ +}) + +typedef efi_status_t (*efi_exit_boot_map_processing)( + efi_system_table_t *sys_table_arg, + struct efi_boot_memmap *map, + void *priv); + +efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, + void *handle, + struct efi_boot_memmap *map, + void *priv, + efi_exit_boot_map_processing priv_func); #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 638b324f0291..e7f358d2e5fc 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -16,7 +16,11 @@ typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); -typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *); +typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_allow_rq_merge_fn) (struct request_queue *, + struct request *, struct request *); typedef void (elevator_bio_merged_fn) (struct request_queue *, struct request *, struct bio *); @@ -26,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); -typedef int (elevator_may_queue_fn) (struct request_queue *, int); +typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); typedef void (elevator_init_icq_fn) (struct io_cq *); typedef void (elevator_exit_icq_fn) (struct io_cq *); @@ -46,7 +50,8 @@ struct elevator_ops elevator_merge_fn *elevator_merge_fn; elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; - elevator_allow_merge_fn *elevator_allow_merge_fn; + elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; + elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; elevator_bio_merged_fn *elevator_bio_merged_fn; elevator_dispatch_fn *elevator_dispatch_fn; @@ -134,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request extern struct request *elv_latter_request(struct request_queue *, struct request *); extern int elv_register_queue(struct request_queue *q); extern void elv_unregister_queue(struct request_queue *q); -extern int elv_may_queue(struct request_queue *, int); +extern int elv_may_queue(struct request_queue *, int, int); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask); @@ -157,7 +162,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); -extern bool elv_rq_merge_ok(struct request *, struct bio *); +extern bool elv_bio_merge_ok(struct request *, struct bio *); extern struct elevator_queue *elevator_alloc(struct request_queue *, struct elevator_type *); diff --git a/include/linux/err.h b/include/linux/err.h index 56762ab41713..1e3558845e4c 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -18,7 +18,7 @@ #ifndef __ASSEMBLY__ -#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) +#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO) static inline void * __must_check ERR_PTR(long error) { diff --git a/include/linux/errno.h b/include/linux/errno.h index 89627b9187f9..7ce9fb1b7d28 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -28,5 +28,6 @@ #define EBADTYPE 527 /* Type not supported by server */ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ +#define ERECALLCONFLICT 530 /* conflict with recalled state */ #endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 37ff4a6faa9a..6fec9e81bd70 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -374,6 +374,29 @@ static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2) } /** + * ether_addr_equal_masked - Compare two Ethernet addresses with a mask + * @addr1: Pointer to a six-byte array containing the 1st Ethernet address + * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address + * @mask: Pointer to a six-byte array containing the Ethernet address bitmask + * + * Compare two Ethernet addresses with a mask, returns true if for every bit + * set in the bitmask the equivalent bits in the ethernet addresses are equal. + * Using a mask with all bits set is a slower ether_addr_equal. + */ +static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, + const u8 *mask) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) { + if ((addr1[i] ^ addr2[i]) & mask[i]) + return false; + } + + return true; +} + +/** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure * @addr: Pointer to a six-byte array containing the Ethernet address diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e2b7bf27c03e..9ded8c6d8176 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -150,6 +150,13 @@ extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32); + +/* return false if src had higher bits set. lower bits always updated. */ +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src); + /** * struct ethtool_ops - optional netdev operations * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings diff --git a/include/linux/export.h b/include/linux/export.h index 96e45ea463e7..c565f87f005e 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -38,7 +38,7 @@ extern struct module __this_module; #ifdef CONFIG_MODULES -#ifndef __GENKSYMS__ +#if defined(__KERNEL__) && !defined(__GENKSYMS__) #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ @@ -53,7 +53,7 @@ extern struct module __this_module; #endif /* For every exported symbol, place a struct in the __ksymtab section */ -#define __EXPORT_SYMBOL(sym, sec) \ +#define ___EXPORT_SYMBOL(sym, sec) \ extern typeof(sym) sym; \ __CRC_SYMBOL(sym, sec) \ static const char __kstrtab_##sym[] \ @@ -65,6 +65,35 @@ extern struct module __this_module; __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ = { (unsigned long)&sym, __kstrtab_##sym } +#if defined(__KSYM_DEPS__) + +/* + * For fine grained build dependencies, we want to tell the build system + * about each possible exported symbol even if they're not actually exported. + * We use a string pattern that is unlikely to be valid code that the build + * system filters out from the preprocessor output (see ksym_dep_filter + * in scripts/Kbuild.include). + */ +#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include <linux/kconfig.h> +#include <generated/autoksyms.h> + +#define __EXPORT_SYMBOL(sym, sec) \ + __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym)) +#define __cond_export_sym(sym, sec, conf) \ + ___cond_export_sym(sym, sec, conf) +#define ___cond_export_sym(sym, sec, enabled) \ + __cond_export_sym_##enabled(sym, sec) +#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec) +#define __cond_export_sym_0(sym, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL ___EXPORT_SYMBOL +#endif + #define EXPORT_SYMBOL(sym) \ __EXPORT_SYMBOL(sym, "") diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index d8414502edb4..b03c0625fa6e 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -6,6 +6,7 @@ struct dentry; struct iattr; struct inode; +struct iomap; struct super_block; struct vfsmount; @@ -187,21 +188,6 @@ struct fid { * get_name is not (which is possibly inconsistent) */ -/* types of block ranges for multipage write mappings. */ -#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ -#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ -#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */ -#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ - -#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ - -struct iomap { - sector_t blkno; /* first sector of mapping */ - loff_t offset; /* file offset of mapping, bytes */ - u64 length; /* length of mapping, bytes */ - int type; /* type of mapping */ -}; - struct export_operations { int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len, struct inode *parent); diff --git a/include/linux/extable.h b/include/linux/extable.h new file mode 100644 index 000000000000..7effea4b257d --- /dev/null +++ b/include/linux/extable.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_EXTABLE_H +#define _LINUX_EXTABLE_H + +#include <linux/stddef.h> /* for NULL */ + +struct module; +struct exception_table_entry; + +const struct exception_table_entry * +search_extable(const struct exception_table_entry *first, + const struct exception_table_entry *last, + unsigned long value); +void sort_extable(struct exception_table_entry *start, + struct exception_table_entry *finish); +void sort_main_extable(void); +void trim_init_extable(struct module *m); + +/* Given an address, look for it in the exception tables */ +const struct exception_table_entry *search_exception_tables(unsigned long add); + +#ifdef CONFIG_MODULES +/* For extable.c to search modules' exception tables. */ +const struct exception_table_entry *search_module_extables(unsigned long addr); +#else +static inline const struct exception_table_entry * +search_module_extables(unsigned long addr) +{ + return NULL; +} +#endif /*CONFIG_MODULES*/ + +#endif /* _LINUX_EXTABLE_H */ diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 7abf674c388c..61004413dc64 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -126,42 +126,6 @@ struct extcon_dev { struct device_attribute *d_attrs_muex; }; -/** - * struct extcon_cable - An internal data for each cable of extcon device. - * @edev: The extcon device - * @cable_index: Index of this cable in the edev - * @attr_g: Attribute group for the cable - * @attr_name: "name" sysfs entry - * @attr_state: "state" sysfs entry - * @attrs: Array pointing to attr_name and attr_state for attr_g - */ -struct extcon_cable { - struct extcon_dev *edev; - int cable_index; - - struct attribute_group attr_g; - struct device_attribute attr_name; - struct device_attribute attr_state; - - struct attribute *attrs[3]; /* to be fed to attr_g.attrs */ -}; - -/** - * struct extcon_specific_cable_nb - An internal data for - * extcon_register_interest(). - * @user_nb: user provided notifier block for events from - * a specific cable. - * @cable_index: the target cable. - * @edev: the target extcon device. - * @previous_value: the saved previous event value. - */ -struct extcon_specific_cable_nb { - struct notifier_block *user_nb; - int cable_index; - struct extcon_dev *edev; - unsigned long previous_value; -}; - #if IS_ENABLED(CONFIG_EXTCON) /* @@ -201,29 +165,12 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state); /* * get/set_cable_state access each bit of the 32b encoded state value. - * They are used to access the status of each cable based on the cable_name. + * They are used to access the status of each cable based on the cable id. */ extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id); extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, bool cable_state); -extern int extcon_get_cable_state(struct extcon_dev *edev, - const char *cable_name); -extern int extcon_set_cable_state(struct extcon_dev *edev, - const char *cable_name, bool cable_state); - -/* - * Following APIs are for notifiees (those who want to be notified) - * to register a callback for events from a specific cable of the extcon. - * Notifiees are the connected device drivers wanting to get notified by - * a specific external port of a connection device. - */ -extern int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, - const char *cable_name, - struct notifier_block *nb); -extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb); - /* * Following APIs are to monitor every action of a notifier. * Registrar gets notified for every external port of a connection device. @@ -235,6 +182,12 @@ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); +extern int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); /* * Following API get the extcon device from devicetree. @@ -246,6 +199,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, /* Following API to get information of extcon device */ extern const char *extcon_get_edev_name(struct extcon_dev *edev); + #else /* CONFIG_EXTCON */ static inline int extcon_dev_register(struct extcon_dev *edev) { @@ -306,18 +260,6 @@ static inline int extcon_set_cable_state_(struct extcon_dev *edev, return 0; } -static inline int extcon_get_cable_state(struct extcon_dev *edev, - const char *cable_name) -{ - return 0; -} - -static inline int extcon_set_cable_state(struct extcon_dev *edev, - const char *cable_name, int state) -{ - return 0; -} - static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { return NULL; @@ -337,19 +279,16 @@ static inline int extcon_unregister_notifier(struct extcon_dev *edev, return 0; } -static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, - const char *cable_name, - struct notifier_block *nb) +static inline int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) { - return 0; + return -ENOSYS; } -static inline int extcon_unregister_interest(struct extcon_specific_cable_nb - *obj) -{ - return 0; -} +static inline void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) { } static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) @@ -357,4 +296,28 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, return ERR_PTR(-ENODEV); } #endif /* CONFIG_EXTCON */ + +/* + * Following structure and API are deprecated. EXTCON remains the function + * definition to prevent the build break. + */ +struct extcon_specific_cable_nb { + struct notifier_block *user_nb; + int cable_index; + struct extcon_dev *edev; + unsigned long previous_value; +}; + +static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, + const char *extcon_name, const char *cable_name, + struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int extcon_unregister_interest(struct extcon_specific_cable_nb + *obj) +{ + return -EINVAL; +} #endif /* __LINUX_EXTCON_H__ */ diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h index 53c60806bcfb..ac85f2061351 100644 --- a/include/linux/extcon/extcon-adc-jack.h +++ b/include/linux/extcon/extcon-adc-jack.h @@ -53,6 +53,7 @@ struct adc_jack_cond { * milli-seconds after the interrupt occurs. You may * describe such delays with @handling_delay_ms, which * is rounded-off by jiffies. + * @wakeup_source: flag to wake up the system for extcon events. */ struct adc_jack_pdata { const char *name; @@ -65,6 +66,7 @@ struct adc_jack_pdata { unsigned long irq_flags; unsigned long handling_delay_ms; /* in ms */ + bool wakeup_source; }; #endif /* _EXTCON_ADC_JACK_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index b90e9bdbd1dd..4c02c6521fef 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -508,4 +508,6 @@ enum { F2FS_FT_MAX }; +#define S_SHIFT 12 + #endif /* _LINUX_F2FS_FS_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h index dfe88351341f..a964d076b4dc 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -673,6 +673,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, } /* drivers/video/fb_defio.c */ +int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); extern void fb_deferred_io_init(struct fb_info *info); extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h new file mode 100644 index 000000000000..86baaa45567c --- /dev/null +++ b/include/linux/fence-array.h @@ -0,0 +1,73 @@ +/* + * fence-array: aggregates fence to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_FENCE_ARRAY_H +#define __LINUX_FENCE_ARRAY_H + +#include <linux/fence.h> + +/** + * struct fence_array_cb - callback helper for fence array + * @cb: fence callback structure for signaling + * @array: reference to the parent fence array object + */ +struct fence_array_cb { + struct fence_cb cb; + struct fence_array *array; +}; + +/** + * struct fence_array - fence to represent an array of fences + * @base: fence base class + * @lock: spinlock for fence handling + * @num_fences: number of fences in the array + * @num_pending: fences in the array still pending + * @fences: array of the fences + */ +struct fence_array { + struct fence base; + + spinlock_t lock; + unsigned num_fences; + atomic_t num_pending; + struct fence **fences; +}; + +extern const struct fence_ops fence_array_ops; + +/** + * to_fence_array - cast a fence to a fence_array + * @fence: fence to cast to a fence_array + * + * Returns NULL if the fence is not a fence_array, + * or the fence_array otherwise. + */ +static inline struct fence_array *to_fence_array(struct fence *fence) +{ + if (fence->ops != &fence_array_ops) + return NULL; + + return container_of(fence, struct fence_array, base); +} + +struct fence_array *fence_array_create(int num_fences, struct fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + +#endif /* __LINUX_FENCE_ARRAY_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h index 2b17698b60b8..2ac6fa5f4712 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -75,12 +75,11 @@ struct fence { struct rcu_head rcu; struct list_head cb_list; spinlock_t *lock; - unsigned context, seqno; + u64 context; + unsigned seqno; unsigned long flags; ktime_t timestamp; int status; - struct list_head child_list; - struct list_head active_list; }; enum fence_flag_bits { @@ -178,7 +177,7 @@ struct fence_ops { }; void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, unsigned context, unsigned seqno); + spinlock_t *lock, u64 context, unsigned seqno); void fence_release(struct kref *kref); void fence_free(struct fence *fence); @@ -352,27 +351,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr) return ret < 0 ? ret : 0; } -unsigned fence_context_alloc(unsigned num); +u64 fence_context_alloc(unsigned num); #define FENCE_TRACE(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ - if (config_enabled(CONFIG_FENCE_TRACE)) \ - pr_info("f %u#%u: " fmt, \ + if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ + pr_info("f %llu#%u: " fmt, \ __ff->context, __ff->seqno, ##args); \ } while (0) #define FENCE_WARN(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ - pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0) #define FENCE_ERR(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ - pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0) diff --git a/include/linux/file.h b/include/linux/file.h index f87d30882a24..7444f5feda12 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -44,6 +44,7 @@ extern struct file *fget_raw(unsigned int fd); extern unsigned long __fdget(unsigned int fd); extern unsigned long __fdget_raw(unsigned int fd); extern unsigned long __fdget_pos(unsigned int fd); +extern void __f_unlock_pos(struct file *); static inline struct fd __to_fd(unsigned long v) { @@ -60,6 +61,18 @@ static inline struct fd fdget_raw(unsigned int fd) return __to_fd(__fdget_raw(fd)); } +static inline struct fd fdget_pos(int fd) +{ + return __to_fd(__fdget_pos(fd)); +} + +static inline void fdput_pos(struct fd f) +{ + if (f.flags & FDPUT_POS_UNLOCK) + __f_unlock_pos(f.file); + fdput(f); +} + extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); extern void set_close_on_exec(unsigned int fd, int flag); diff --git a/include/linux/filter.h b/include/linux/filter.h index a51a5361695f..a16439b99fd9 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -13,6 +13,8 @@ #include <linux/printk.h> #include <linux/workqueue.h> #include <linux/sched.h> +#include <linux/capability.h> + #include <net/sch_generic.h> #include <asm/cacheflush.h> @@ -42,6 +44,15 @@ struct bpf_prog_aux; #define BPF_REG_X BPF_REG_7 #define BPF_REG_TMP BPF_REG_8 +/* Kernel hidden auxiliary/helper register for hardening step. + * Only used by eBPF JITs. It's nothing more than a temporary + * register that JITs use internally, only that here it's part + * of eBPF instructions that have been rewritten for blinding + * constants. See JIT pre-step in bpf_jit_blind_constants(). + */ +#define BPF_REG_AX MAX_BPF_REG +#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) + /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 @@ -352,6 +363,27 @@ struct sk_filter { #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_end; +}; + +struct xdp_buff { + void *data; + void *data_end; +}; + +/* compute the linear packet data range [data, data_end) which + * will be accessed by cls_bpf and act_bpf programs + */ +static inline void bpf_compute_data_end(struct sk_buff *skb) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); + cb->data_end = skb->data + skb_headlen(skb); +} + static inline u8 *bpf_skb_cb(struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta @@ -402,6 +434,18 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, return BPF_PROG_RUN(prog, skb); } +static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 ret; + + rcu_read_lock(); + ret = BPF_PROG_RUN(prog, (void *)xdp); + rcu_read_unlock(); + + return ret; +} + static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), @@ -440,9 +484,13 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) } #endif /* CONFIG_DEBUG_SET_MODULE_RONX */ -int sk_filter(struct sock *sk, struct sk_buff *skb); +int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); +static inline int sk_filter(struct sock *sk, struct sk_buff *skb) +{ + return sk_filter_trim_cap(sk, skb, 1); +} -int bpf_prog_select_runtime(struct bpf_prog *fp); +struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); @@ -465,14 +513,10 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); -int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, - bool locked); int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); -int __sk_detach_filter(struct sock *sk, bool locked); - int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); @@ -480,10 +524,18 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_int_jit_compile(struct bpf_prog *fp); + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_skb_data(void *func); +struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, + const struct bpf_insn *patch, u32 len); +void bpf_warn_invalid_xdp_action(u32 act); + #ifdef CONFIG_BPF_JIT +extern int bpf_jit_enable; +extern int bpf_jit_harden; + typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); struct bpf_binary_header * @@ -495,6 +547,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); +struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); +void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); + static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { @@ -505,6 +560,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } + +static inline bool bpf_jit_is_ebpf(void) +{ +# ifdef CONFIG_HAVE_EBPF_JIT + return true; +# else + return false; +# endif +} + +static inline bool bpf_jit_blinding_enabled(void) +{ + /* These are the prerequisites, should someone ever have the + * idea to call blinding outside of them, we make sure to + * bail out. + */ + if (!bpf_jit_is_ebpf()) + return false; + if (!bpf_jit_enable) + return false; + if (!bpf_jit_harden) + return false; + if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) + return false; + + return true; +} #else static inline void bpf_jit_compile(struct bpf_prog *fp) { diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 5c41c5e75b5c..b1f9f0ccb8ac 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -47,6 +47,8 @@ int request_firmware_nowait( void (*cont)(const struct firmware *fw, void *context)); int request_firmware_direct(const struct firmware **fw, const char *name, struct device *device); +int request_firmware_into_buf(const struct firmware **firmware_p, + const char *name, struct device *device, void *buf, size_t size); void release_firmware(const struct firmware *fw); #else @@ -75,5 +77,11 @@ static inline int request_firmware_direct(const struct firmware **fw, return -EINVAL; } +static inline int request_firmware_into_buf(const struct firmware **firmware_p, + const char *name, struct device *device, void *buf, size_t size) +{ + return -EINVAL; +} + #endif #endif diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index e65ef959546c..c46d2aa16d81 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -4,6 +4,7 @@ #include <linux/swap.h> #include <linux/mm.h> #include <linux/bitops.h> +#include <linux/jump_label.h> struct frontswap_ops { void (*init)(unsigned); /* this swap type was just swapon'ed */ @@ -14,7 +15,6 @@ struct frontswap_ops { struct frontswap_ops *next; /* private pointer to next ops */ }; -extern bool frontswap_enabled; extern void frontswap_register_ops(struct frontswap_ops *ops); extern void frontswap_shrink(unsigned long); extern unsigned long frontswap_curr_pages(void); @@ -30,7 +30,12 @@ extern void __frontswap_invalidate_page(unsigned, pgoff_t); extern void __frontswap_invalidate_area(unsigned); #ifdef CONFIG_FRONTSWAP -#define frontswap_enabled (1) +extern struct static_key_false frontswap_enabled_key; + +static inline bool frontswap_enabled(void) +{ + return static_branch_unlikely(&frontswap_enabled_key); +} static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { @@ -50,7 +55,10 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) #else /* all inline routines become no-ops and all externs are ignored */ -#define frontswap_enabled (0) +static inline bool frontswap_enabled(void) +{ + return false; +} static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { @@ -70,37 +78,35 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) static inline int frontswap_store(struct page *page) { - int ret = -1; + if (frontswap_enabled()) + return __frontswap_store(page); - if (frontswap_enabled) - ret = __frontswap_store(page); - return ret; + return -1; } static inline int frontswap_load(struct page *page) { - int ret = -1; + if (frontswap_enabled()) + return __frontswap_load(page); - if (frontswap_enabled) - ret = __frontswap_load(page); - return ret; + return -1; } static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_invalidate_page(type, offset); } static inline void frontswap_invalidate_area(unsigned type) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_invalidate_area(type); } static inline void frontswap_init(unsigned type, unsigned long *map) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_init(type, map); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 70e61b58baaf..901e25d495cc 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -74,7 +74,6 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private); -typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); #define MAY_EXEC 0x00000001 #define MAY_WRITE 0x00000002 @@ -153,9 +152,10 @@ typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); #define CHECK_IOVEC_ONLY -1 /* - * The below are the various read and write types that we support. Some of + * The below are the various read and write flags that we support. Some of * them include behavioral modifiers that send information down to the - * block layer and IO scheduler. Terminology: + * block layer and IO scheduler. They should be used along with a req_op. + * Terminology: * * The block layer uses device plugging to defer IO a little bit, in * the hope that we will see more IO very shortly. This increases @@ -178,9 +178,6 @@ typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); * READ_SYNC A synchronous read. Device is not plugged, caller can * immediately wait on this read without caring about * unplugging. - * READA Used for read-ahead operations. Lower priority, and the - * block layer could (in theory) choose to ignore this - * request if it runs into resource problems. * WRITE A normal async write. Device will be plugged. * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down * the hint that someone will be waiting on this IO @@ -194,19 +191,17 @@ typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); * non-volatile media on completion. * */ -#define RW_MASK REQ_WRITE -#define RWA_MASK REQ_RAHEAD +#define RW_MASK REQ_OP_WRITE -#define READ 0 -#define WRITE RW_MASK -#define READA RWA_MASK +#define READ REQ_OP_READ +#define WRITE REQ_OP_WRITE -#define READ_SYNC (READ | REQ_SYNC) -#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) -#define WRITE_ODIRECT (WRITE | REQ_SYNC) -#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) -#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) -#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) +#define READ_SYNC REQ_SYNC +#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) +#define WRITE_ODIRECT REQ_SYNC +#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) +#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) +#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) /* * Attribute flags. These should be or-ed together to figure out what @@ -323,6 +318,8 @@ struct writeback_control; #define IOCB_APPEND (1 << 1) #define IOCB_DIRECT (1 << 2) #define IOCB_HIPRI (1 << 3) +#define IOCB_DSYNC (1 << 4) +#define IOCB_SYNC (1 << 5) struct kiocb { struct file *ki_filp; @@ -394,13 +391,15 @@ struct address_space_operations { void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); - ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* * migrate the contents of a page to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. */ int (*migratepage) (struct address_space *, struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, unsigned long, unsigned long); @@ -458,7 +457,6 @@ struct block_device { struct inode * bd_inode; /* will die */ struct super_block * bd_super; struct mutex bd_mutex; /* open/close mutex */ - struct list_head bd_inodes; void * bd_claiming; void * bd_holder; int bd_holders; @@ -576,6 +574,19 @@ static inline void mapping_allow_writable(struct address_space *mapping) struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) +#define ACL_DONT_CACHE ((void *)(-3)) + +static inline struct posix_acl * +uncached_acl_sentinel(struct task_struct *task) +{ + return (void *)task + 1; +} + +static inline bool +is_uncached_acl(struct posix_acl *acl) +{ + return (long)acl & 1; +} #define IOP_FASTPERM 0x0001 #define IOP_LOOKUP 0x0002 @@ -635,7 +646,7 @@ struct inode { /* Misc */ unsigned long i_state; - struct mutex i_mutex; + struct rw_semaphore i_rwsem; unsigned long dirtied_when; /* jiffies of first dirtying */ unsigned long dirtied_time_when; @@ -652,6 +663,7 @@ struct inode { #endif struct list_head i_lru; /* inode LRU list */ struct list_head i_sb_list; + struct list_head i_wb_list; /* backing dev writeback list */ union { struct hlist_head i_dentry; struct rcu_head i_rcu; @@ -672,6 +684,7 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; + unsigned i_dir_seq; }; __u32 i_generation; @@ -721,27 +734,42 @@ enum inode_i_mutex_lock_class static inline void inode_lock(struct inode *inode) { - mutex_lock(&inode->i_mutex); + down_write(&inode->i_rwsem); } static inline void inode_unlock(struct inode *inode) { - mutex_unlock(&inode->i_mutex); + up_write(&inode->i_rwsem); +} + +static inline void inode_lock_shared(struct inode *inode) +{ + down_read(&inode->i_rwsem); +} + +static inline void inode_unlock_shared(struct inode *inode) +{ + up_read(&inode->i_rwsem); } static inline int inode_trylock(struct inode *inode) { - return mutex_trylock(&inode->i_mutex); + return down_write_trylock(&inode->i_rwsem); +} + +static inline int inode_trylock_shared(struct inode *inode) +{ + return down_read_trylock(&inode->i_rwsem); } static inline int inode_is_locked(struct inode *inode) { - return mutex_is_locked(&inode->i_mutex); + return rwsem_is_locked(&inode->i_rwsem); } static inline void inode_lock_nested(struct inode *inode, unsigned subclass) { - mutex_lock_nested(&inode->i_mutex, subclass); + down_write_nested(&inode->i_rwsem, subclass); } void lock_two_nondirectories(struct inode *, struct inode*); @@ -802,31 +830,6 @@ static inline void i_size_write(struct inode *inode, loff_t i_size) #endif } -/* Helper functions so that in most cases filesystems will - * not need to deal directly with kuid_t and kgid_t and can - * instead deal with the raw numeric values that are stored - * in the filesystem. - */ -static inline uid_t i_uid_read(const struct inode *inode) -{ - return from_kuid(&init_user_ns, inode->i_uid); -} - -static inline gid_t i_gid_read(const struct inode *inode) -{ - return from_kgid(&init_user_ns, inode->i_gid); -} - -static inline void i_uid_write(struct inode *inode, uid_t uid) -{ - inode->i_uid = make_kuid(&init_user_ns, uid); -} - -static inline void i_gid_write(struct inode *inode, gid_t gid) -{ - inode->i_gid = make_kgid(&init_user_ns, gid); -} - static inline unsigned iminor(const struct inode *inode) { return MINOR(inode->i_rdev); @@ -1243,12 +1246,7 @@ static inline struct inode *file_inode(const struct file *f) static inline struct dentry *file_dentry(const struct file *file) { - struct dentry *dentry = file->f_path.dentry; - - if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) - return dentry->d_op->d_real(dentry, file_inode(file)); - else - return dentry; + return d_real(file->f_path.dentry, file_inode(file), 0); } static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) @@ -1298,6 +1296,10 @@ struct mm_struct; /* sb->s_iflags */ #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ +#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ + +/* sb->s_iflags to limit user namespace mounts */ +#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ /* Possible states of 'frozen' field */ enum { @@ -1401,6 +1403,13 @@ struct super_block { struct hlist_head s_pins; /* + * Owning user namespace and default context in which to + * interpret filesystem uids, gids, quotas, device nodes, + * xattrs and security labels. + */ + struct user_namespace *s_user_ns; + + /* * Keep the lru lists last in the structure so they always sit on their * own individual cachelines. */ @@ -1419,8 +1428,36 @@ struct super_block { /* s_inode_list_lock protects s_inodes */ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; struct list_head s_inodes; /* all inodes */ + + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; /* writeback inodes */ }; +/* Helper functions so that in most cases filesystems will + * not need to deal directly with kuid_t and kgid_t and can + * instead deal with the raw numeric values that are stored + * in the filesystem. + */ +static inline uid_t i_uid_read(const struct inode *inode) +{ + return from_kuid(inode->i_sb->s_user_ns, inode->i_uid); +} + +static inline gid_t i_gid_read(const struct inode *inode) +{ + return from_kgid(inode->i_sb->s_user_ns, inode->i_gid); +} + +static inline void i_uid_write(struct inode *inode, uid_t uid) +{ + inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid); +} + +static inline void i_gid_write(struct inode *inode, gid_t gid) +{ + inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); +} + extern struct timespec current_fs_time(struct super_block *sb); /* @@ -1563,6 +1600,7 @@ extern int vfs_whiteout(struct inode *, struct dentry *); */ extern void inode_init_owner(struct inode *inode, const struct inode *dir, umode_t mode); +extern bool may_open_dev(const struct path *path); /* * VFS FS_IOC_FIEMAP helper definitions. */ @@ -1646,6 +1684,7 @@ struct file_operations { ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); + int (*iterate_shared) (struct file *, struct dir_context *); unsigned int (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); @@ -1699,8 +1738,10 @@ struct inode_operations { struct inode *, struct dentry *, unsigned int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); - int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); - ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); + int (*setxattr) (struct dentry *, struct inode *, + const char *, const void *, size_t, int); + ssize_t (*getxattr) (struct dentry *, struct inode *, + const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, @@ -1830,6 +1871,11 @@ struct super_operations { #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) +static inline bool HAS_UNMAPPED_ID(struct inode *inode) +{ + return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid); +} + /* * Inode state bits. Protected by inode->i_lock * @@ -1978,8 +2024,6 @@ struct file_system_type { #define FS_BINARY_MOUNTDATA 2 #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ -#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */ -#define FS_USERNS_VISIBLE 32 /* FS must already be visible */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); @@ -2000,8 +2044,9 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) -extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, - void *data, int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_ns(struct file_system_type *fs_type, + int flags, void *data, void *ns, struct user_namespace *user_ns, + int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2021,6 +2066,11 @@ void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); int get_anon_bdev(dev_t *); void free_anon_bdev(dev_t); +struct super_block *sget_userns(struct file_system_type *type, + int (*test)(struct super_block *,void *), + int (*set)(struct super_block *,void *), + int flags, struct user_namespace *user_ns, + void *data); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), @@ -2263,7 +2313,7 @@ struct filename { const char iname[]; }; -extern long vfs_truncate(struct path *, loff_t); +extern long vfs_truncate(const struct path *, loff_t); extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); extern int vfs_fallocate(struct file *file, int mode, loff_t offset, @@ -2320,14 +2370,6 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); -#ifdef CONFIG_FS_DAX -extern bool blkdev_dax_capable(struct block_device *bdev); -#else -static inline bool blkdev_dax_capable(struct block_device *bdev) -{ - return false; -} -#endif extern struct super_block *blockdev_superblock; @@ -2395,6 +2437,8 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, /* fs/char_dev.c */ #define CHRDEV_MAJOR_HASH_SIZE 255 +/* Marks the bottom of the first segment of free char majors */ +#define CHRDEV_MAJOR_DYN_END 234 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int __register_chrdev(unsigned int major, unsigned int baseminor, @@ -2438,15 +2482,18 @@ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK -/* - * return READ, READA, or WRITE - */ -#define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) +static inline bool op_is_write(unsigned int op) +{ + return op == REQ_OP_READ ? false : true; +} /* * return data direction, READ or WRITE */ -#define bio_data_dir(bio) ((bio)->bi_rw & 1) +static inline int bio_data_dir(struct bio *bio) +{ + return op_is_write(bio_op(bio)) ? WRITE : READ; +} extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); @@ -2481,17 +2528,30 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); +extern int filemap_check_errors(struct address_space *mapping); extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); -static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count) -{ - if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) - return 0; - return vfs_fsync_range(file, pos, pos + count - 1, - (file->f_flags & __O_SYNC) ? 0 : 1); + +/* + * Sync the bytes written if this was a synchronous write. Expect ki_pos + * to already be updated for the write, and will return either the amount + * of bytes passed in, or an error if syncing the file failed. + */ +static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) +{ + if (iocb->ki_flags & IOCB_DSYNC) { + int ret = vfs_fsync_range(iocb->ki_filp, + iocb->ki_pos - count, iocb->ki_pos - 1, + (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); + if (ret) + return ret; + } + + return count; } + extern void emergency_sync(void); extern void emergency_remount(void); #ifdef CONFIG_BLOCK @@ -2590,15 +2650,35 @@ static inline void i_readcount_inc(struct inode *inode) #endif extern int do_pipe_flags(int *, int); +#define __kernel_read_file_id(id) \ + id(UNKNOWN, unknown) \ + id(FIRMWARE, firmware) \ + id(FIRMWARE_PREALLOC_BUFFER, firmware) \ + id(MODULE, kernel-module) \ + id(KEXEC_IMAGE, kexec-image) \ + id(KEXEC_INITRAMFS, kexec-initramfs) \ + id(POLICY, security-policy) \ + id(MAX_ID, ) + +#define __fid_enumify(ENUM, dummy) READING_ ## ENUM, +#define __fid_stringify(dummy, str) #str, + enum kernel_read_file_id { - READING_FIRMWARE = 1, - READING_MODULE, - READING_KEXEC_IMAGE, - READING_KEXEC_INITRAMFS, - READING_POLICY, - READING_MAX_ID + __kernel_read_file_id(__fid_enumify) }; +static const char * const kernel_read_file_str[] = { + __kernel_read_file_id(__fid_stringify) +}; + +static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) +{ + if (id < 0 || id >= READING_MAX_ID) + return kernel_read_file_str[READING_UNKNOWN]; + + return kernel_read_file_str[id]; +} + extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, enum kernel_read_file_id); @@ -2668,11 +2748,6 @@ extern struct inode *new_inode(struct super_block *sb); extern void free_inode_nonrcu(struct inode *inode); extern int should_remove_suid(struct dentry *); extern int file_remove_privs(struct file *); -extern int dentry_needs_remove_privs(struct dentry *dentry); -static inline int file_needs_remove_privs(struct file *file) -{ - return dentry_needs_remove_privs(file->f_path.dentry); -} extern void __insert_inode_hash(struct inode *, unsigned long hashval); static inline void insert_inode_hash(struct inode *inode) @@ -2690,7 +2765,7 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); #ifdef CONFIG_BLOCK -extern blk_qc_t submit_bio(int, struct bio *); +extern blk_qc_t submit_bio(struct bio *); extern int bdev_read_only(struct block_device *); #endif extern int set_blocksize(struct block_device *, int); @@ -2703,7 +2778,7 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); -extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t); +extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos); @@ -2745,7 +2820,7 @@ extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); #ifdef CONFIG_BLOCK -typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, +typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, loff_t file_offset); enum { @@ -2766,18 +2841,17 @@ void dio_end_io(struct bio *bio, int error); ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, - loff_t offset, get_block_t get_block, + get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags); static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, - struct iov_iter *iter, loff_t offset, + struct iov_iter *iter, get_block_t get_block) { return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, - offset, get_block, NULL, NULL, - DIO_LOCKING | DIO_SKIP_HOLES); + get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); } #endif @@ -2943,6 +3017,10 @@ static inline int iocb_flags(struct file *file) res |= IOCB_APPEND; if (io_is_direct(file)) res |= IOCB_DIRECT; + if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) + res |= IOCB_DSYNC; + if (file->f_flags & __O_SYNC) + res |= IOCB_SYNC; return res; } @@ -3104,6 +3182,13 @@ static inline bool dir_relax(struct inode *inode) return !IS_DEADDIR(inode); } +static inline bool dir_relax_shared(struct inode *inode) +{ + inode_unlock_shared(inode); + inode_lock_shared(inode); + return !IS_DEADDIR(inode); +} + extern bool path_noexec(const struct path *path); extern void inode_nohighmem(struct inode *inode); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 604e1526cd00..13ba552e6c09 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -241,7 +241,7 @@ struct fscache_cache_ops { /* check the consistency between the backing cache and the FS-Cache * cookie */ - bool (*check_consistency)(struct fscache_operation *op); + int (*check_consistency)(struct fscache_operation *op); /* store the updated auxiliary data on an object */ void (*update_object)(struct fscache_object *object); diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h index 6027f6bbb061..76cff18bb032 100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h @@ -175,6 +175,7 @@ struct fscrypt_name { */ struct fscrypt_operations { int (*get_context)(struct inode *, void *, size_t); + int (*key_prefix)(struct inode *, u8 **); int (*prepare_context)(struct inode *); int (*set_context)(struct inode *, const void *, size_t, void *); int (*dummy_context)(struct inode *); @@ -273,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *); extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, unsigned int); /* policy.c */ -extern int fscrypt_process_policy(struct inode *, - const struct fscrypt_policy *); +extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *); extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, @@ -344,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, } /* policy.c */ -static inline int fscrypt_notsupp_process_policy(struct inode *i, +static inline int fscrypt_notsupp_process_policy(struct file *f, const struct fscrypt_policy *p) { return -EOPNOTSUPP; diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index 0023088b253b..3f9778cbc79d 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -39,6 +39,10 @@ #define FSL_IFC_VERSION_MASK 0x0F0F0000 #define FSL_IFC_VERSION_1_0_0 0x01000000 #define FSL_IFC_VERSION_1_1_0 0x01010000 +#define FSL_IFC_VERSION_2_0_0 0x02000000 + +#define PGOFFSET_64K (64*1024) +#define PGOFFSET_4K (4*1024) /* * CSPR - Chip Select Property Register @@ -723,20 +727,26 @@ struct fsl_ifc_nand { __be32 nand_evter_en; u32 res17[0x2]; __be32 nand_evter_intr_en; - u32 res18[0x2]; + __be32 nand_vol_addr_stat; + u32 res18; __be32 nand_erattr0; __be32 nand_erattr1; u32 res19[0x10]; __be32 nand_fsr; - u32 res20; - __be32 nand_eccstat[4]; - u32 res21[0x20]; + u32 res20[0x3]; + __be32 nand_eccstat[6]; + u32 res21[0x1c]; __be32 nanndcr; u32 res22[0x2]; __be32 nand_autoboot_trgr; u32 res23; __be32 nand_mdr; - u32 res24[0x5C]; + u32 res24[0x1C]; + __be32 nand_dll_lowcfg0; + __be32 nand_dll_lowcfg1; + u32 res25; + __be32 nand_dll_lowstat; + u32 res26[0x3c]; }; /* @@ -771,13 +781,12 @@ struct fsl_ifc_gpcm { __be32 gpcm_erattr1; __be32 gpcm_erattr2; __be32 gpcm_stat; - u32 res4[0x1F3]; }; /* * IFC Controller Registers */ -struct fsl_ifc_regs { +struct fsl_ifc_global { __be32 ifc_rev; u32 res1[0x2]; struct { @@ -803,21 +812,26 @@ struct fsl_ifc_regs { } ftim_cs[FSL_IFC_BANK_COUNT]; u32 res9[0x30]; __be32 rb_stat; - u32 res10[0x2]; + __be32 rb_map; + __be32 wb_map; __be32 ifc_gcr; - u32 res11[0x2]; + u32 res10[0x2]; __be32 cm_evter_stat; - u32 res12[0x2]; + u32 res11[0x2]; __be32 cm_evter_en; - u32 res13[0x2]; + u32 res12[0x2]; __be32 cm_evter_intr_en; - u32 res14[0x2]; + u32 res13[0x2]; __be32 cm_erattr0; __be32 cm_erattr1; - u32 res15[0x2]; + u32 res14[0x2]; __be32 ifc_ccr; __be32 ifc_csr; - u32 res16[0x2EB]; + __be32 ddr_ccr_low; +}; + + +struct fsl_ifc_runtime { struct fsl_ifc_nand ifc_nand; struct fsl_ifc_nor ifc_nor; struct fsl_ifc_gpcm ifc_gpcm; @@ -831,7 +845,8 @@ extern int fsl_ifc_find(phys_addr_t addr_base); struct fsl_ifc_ctrl { /* device info */ struct device *dev; - struct fsl_ifc_regs __iomem *regs; + struct fsl_ifc_global __iomem *gregs; + struct fsl_ifc_runtime __iomem *rregs; int irq; int nand_irq; spinlock_t lock; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 0141f257d67b..eed9e853a06f 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -52,18 +52,6 @@ static inline int fsnotify_perm(struct file *file, int mask) } /* - * fsnotify_d_move - dentry has been moved - */ -static inline void fsnotify_d_move(struct dentry *dentry) -{ - /* - * On move we need to update dentry->d_flags to indicate if the new parent - * cares about events from this dentry. - */ - __fsnotify_update_dcache_flags(dentry); -} - -/* * fsnotify_link_count - inode's link count changed */ static inline void fsnotify_link_count(struct inode *inode) diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1259e53d9296..7268ed076be8 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -148,6 +148,7 @@ struct fsnotify_group { #define FS_PRIO_1 1 /* fanotify content based access control */ #define FS_PRIO_2 2 /* fanotify pre-content access */ unsigned int priority; + bool shutdown; /* group is being shut down, don't queue more events */ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ @@ -179,7 +180,6 @@ struct fsnotify_group { spinlock_t access_lock; struct list_head access_list; wait_queue_head_t access_waitq; - atomic_t bypass_perm; #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ int f_flags; unsigned int max_marks; @@ -267,10 +267,8 @@ static inline int fsnotify_inode_watches_children(struct inode *inode) * Update the dentry with a flag indicating the interest of its parent to receive * filesystem events when those events happens to this dentry->d_inode. */ -static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) +static inline void fsnotify_update_flags(struct dentry *dentry) { - struct dentry *parent; - assert_spin_locked(&dentry->d_lock); /* @@ -280,21 +278,12 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) * find our entry, so it will spin until we complete here, and update * us with the new state. */ - parent = dentry->d_parent; - if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) + if (fsnotify_inode_watches_children(dentry->d_parent->d_inode)) dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; } -/* - * fsnotify_d_instantiate - instantiate a dentry for inode - */ -static inline void __fsnotify_d_instantiate(struct dentry *dentry) -{ - __fsnotify_update_dcache_flags(dentry); -} - /* called from fsnotify listeners, such as fanotify or dnotify */ /* create a new group */ @@ -303,6 +292,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); +/* group destruction begins, stop queuing new events */ +extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); /* destroy group */ extern void fsnotify_destroy_group(struct fsnotify_group *group); /* fasync handler function */ @@ -315,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)); -/* Remove passed event from groups notification queue */ -extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event); /* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ @@ -359,8 +348,6 @@ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); /* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); -/* run all the marks in a group, and flag them to be freed */ -extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_unmount_inodes(struct super_block *sb); @@ -388,10 +375,7 @@ static inline void __fsnotify_inode_delete(struct inode *inode) static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) {} -static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) -{} - -static inline void __fsnotify_d_instantiate(struct dentry *dentry) +static inline void fsnotify_update_flags(struct dentry *dentry) {} static inline u32 fsnotify_get_cookie(void) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index dea12a6e413b..7d565afe35d2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); +unsigned long ftrace_location_range(unsigned long start, unsigned long end); unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); @@ -753,23 +754,27 @@ static inline void ftrace_init(void) { } /* * Structure that defines an entry function trace. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. */ struct ftrace_graph_ent { unsigned long func; /* Current function */ int depth; -}; +} __packed; /* * Structure that defines a return function trace. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. */ struct ftrace_graph_ret { unsigned long func; /* Current function */ - unsigned long long calltime; - unsigned long long rettime; /* Number of functions that overran the depth limit for current task */ unsigned long overrun; + unsigned long long calltime; + unsigned long long rettime; int depth; -}; +} __packed; /* Type of the callback handlers for tracing function graph*/ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5c706765404a..1dbf52f9c24b 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -14,6 +14,7 @@ #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> +#include <linux/uuid.h> #ifdef CONFIG_BLOCK @@ -93,7 +94,7 @@ struct disk_stats { * Enough for the string representation of any kind of UUID plus NULL. * EFI UUID is 36 characters. MSDOS UUID is 11 characters. */ -#define PARTITION_META_INFO_UUIDLTH 37 +#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) struct partition_meta_info { char uuid[PARTITION_META_INFO_UUIDLTH]; @@ -204,7 +205,6 @@ struct gendisk { void *private_data; int flags; - struct device *driverfs_dev; // FIXME: remove struct kobject *slave_dir; struct timer_rand_state *random; @@ -228,27 +228,9 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part) return NULL; } -static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) -{ - int i; - for (i = 0; i < 16; ++i) { - *to++ = (hex_to_bin(*uuid_str) << 4) | - (hex_to_bin(*(uuid_str + 1))); - uuid_str += 2; - switch (i) { - case 3: - case 5: - case 7: - case 9: - uuid_str++; - continue; - } - } -} - static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to) { - part_pack_uuid(uuid_str, to); + uuid_be_to_bin(uuid_str, (uuid_be *)to); return 0; } @@ -431,7 +413,12 @@ static inline void free_part_info(struct hd_struct *part) extern void part_round_stats(int cpu, struct hd_struct *part); /* block/genhd.c */ -extern void add_disk(struct gendisk *disk); +extern void device_add_disk(struct device *parent, struct gendisk *disk); +static inline void add_disk(struct gendisk *disk) +{ + device_add_disk(NULL, disk); +} + extern void del_gendisk(struct gendisk *gp); extern struct gendisk *get_gendisk(dev_t dev, int *partno); extern struct block_device *bdget_disk(struct gendisk *disk, int partno); diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index eecd19b37001..6270a56e5edc 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -62,6 +62,11 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); /* MAGIC helpers {{{2 */ +static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0); +} + /* possible field types */ #define __flg_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U8, char, \ @@ -80,7 +85,7 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); nla_get_u32, nla_put_u32, true) #define __u64_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U64, __u64, \ - nla_get_u64, nla_put_u64, false) + nla_get_u64, nla_put_u64_0pad, false) #define __str_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \ nla_strlcpy, nla_put, false) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 570383a41853..f8041f9de31e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -78,8 +78,7 @@ struct vm_area_struct; * __GFP_THISNODE forces the allocation to be satisified from the requested * node with no fallbacks or placement policy enforcements. * - * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant - * to kmem allocations). + * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) @@ -238,9 +237,11 @@ struct vm_area_struct; * are expected to be movable via page reclaim or page migration. Typically, * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. * - * GFP_TRANSHUGE is used for THP allocations. They are compound allocations - * that will fail quickly if memory is not available and will not wake - * kswapd on failure. + * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are + * compound allocations that will generally fail quickly if memory is not + * available and will not wake kswapd/kcompactd on failure. The _LIGHT + * version does not attempt reclaim/compaction at all and is by default used + * in page fault path, while the non-light is used by khugepaged. */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) @@ -255,9 +256,9 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) -#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ - __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ - ~__GFP_RECLAIM) +#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) +#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) @@ -486,10 +487,6 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) -extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); -extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, - unsigned int order); - extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); @@ -513,9 +510,6 @@ extern void *__alloc_page_frag(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask); extern void __free_page_frag(void *addr); -extern void __free_kmem_pages(struct page *page, unsigned int order); -extern void free_kmem_pages(unsigned long addr, unsigned int order); - #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index bee976f82788..50882e09289b 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -20,6 +20,18 @@ struct gpio_device; #ifdef CONFIG_GPIOLIB /** + * enum single_ended_mode - mode for single ended operation + * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low + * @LINE_MODE_OPEN_DRAIN: set line to be open drain + * @LINE_MODE_OPEN_SOURCE: set line to be open source + */ +enum single_ended_mode { + LINE_MODE_PUSH_PULL, + LINE_MODE_OPEN_DRAIN, + LINE_MODE_OPEN_SOURCE, +}; + +/** * struct gpio_chip - abstract a GPIO controller * @label: a functional name for the GPIO device, such as a part * number or the name of the SoC IP-block implementing it. @@ -38,7 +50,15 @@ struct gpio_device; * @set: assigns output value for signal "offset" * @set_multiple: assigns output values for multiple signals defined by "mask" * @set_debounce: optional hook for setting debounce time for specified gpio in - * interrupt triggered gpio chips + * interrupt triggered gpio chips + * @set_single_ended: optional hook for setting a line as open drain, open + * source, or non-single ended (restore from open drain/source to normal + * push-pull mode) this should be implemented if the hardware supports + * open drain or open source settings. The GPIOlib will otherwise try + * to emulate open drain/source by not actively driving lines high/low + * if a consumer request this. The driver may return -ENOTSUPP if e.g. + * it supports just open drain but not open source and is called + * with LINE_MODE_OPEN_SOURCE as mode argument. * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; * implementation may not sleep * @dbg_show: optional routine to show contents in debugfs; default code @@ -130,6 +150,9 @@ struct gpio_chip { int (*set_debounce)(struct gpio_chip *chip, unsigned offset, unsigned debounce); + int (*set_single_ended)(struct gpio_chip *chip, + unsigned offset, + enum single_ended_mode mode); int (*to_irq)(struct gpio_chip *chip, unsigned offset); diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index ee2d8c6f9130..0b71024c082c 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -2,7 +2,6 @@ #define _GPIO_KEYS_H struct device; -struct gpio_desc; /** * struct gpio_keys_button - configuration parameters @@ -18,7 +17,6 @@ struct gpio_desc; * disable button via sysfs * @value: axis value for %EV_ABS * @irq: Irq number in case of interrupt keys - * @gpiod: GPIO descriptor */ struct gpio_keys_button { unsigned int code; @@ -31,7 +29,6 @@ struct gpio_keys_button { bool can_disable; int value; unsigned int irq; - struct gpio_desc *gpiod; }; /** @@ -46,7 +43,7 @@ struct gpio_keys_button { * @name: input device name */ struct gpio_keys_platform_data { - struct gpio_keys_button *buttons; + const struct gpio_keys_button *buttons; int nbuttons; unsigned int poll_interval; unsigned int rep:1; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index dfd59d6bc6f0..c683996110b1 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -61,6 +61,7 @@ extern void irq_exit(void); #define nmi_enter() \ do { \ + printk_nmi_enter(); \ lockdep_off(); \ ftrace_nmi_enter(); \ BUG_ON(in_nmi()); \ @@ -77,6 +78,7 @@ extern void irq_exit(void); preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ ftrace_nmi_exit(); \ lockdep_on(); \ + printk_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hash.h b/include/linux/hash.h index 79c52fa81cac..ad6fa21d977b 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -3,92 +3,94 @@ /* Fast hashing routine for ints, longs and pointers. (C) 2002 Nadia Yvette Chambers, IBM */ -/* - * Knuth recommends primes in approximately golden ratio to the maximum - * integer representable by a machine word for multiplicative hashing. - * Chuck Lever verified the effectiveness of this technique: - * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf - * - * These primes are chosen to be bit-sparse, that is operations on - * them can use shifts and additions instead of multiplications for - * machines where multiplications are slow. - */ - #include <asm/types.h> #include <linux/compiler.h> -/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ -#define GOLDEN_RATIO_PRIME_32 0x9e370001UL -/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ -#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL - +/* + * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and + * fs/inode.c. It's not actually prime any more (the previous primes + * were actively bad for hashing), but the name remains. + */ #if BITS_PER_LONG == 32 -#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32 #define hash_long(val, bits) hash_32(val, bits) #elif BITS_PER_LONG == 64 #define hash_long(val, bits) hash_64(val, bits) -#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64 #else #error Wordsize not 32 or 64 #endif /* - * The above primes are actively bad for hashing, since they are - * too sparse. The 32-bit one is mostly ok, the 64-bit one causes - * real problems. Besides, the "prime" part is pointless for the - * multiplicative hash. + * This hash multiplies the input by a large odd number and takes the + * high bits. Since multiplication propagates changes to the most + * significant end only, it is essential that the high bits of the + * product be used for the hash value. + * + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf * * Although a random odd number will do, it turns out that the golden * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice - * properties. + * properties. (See Knuth vol 3, section 6.4, exercise 9.) * - * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. - * (See Knuth vol 3, section 6.4, exercise 9.) + * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2, + * which is very slightly easier to multiply by and makes no + * difference to the hash distribution. */ #define GOLDEN_RATIO_32 0x61C88647 #define GOLDEN_RATIO_64 0x61C8864680B583EBull -static __always_inline u64 hash_64(u64 val, unsigned int bits) -{ - u64 hash = val; +#ifdef CONFIG_HAVE_ARCH_HASH +/* This header may use the GOLDEN_RATIO_xx constants */ +#include <asm/hash.h> +#endif -#if BITS_PER_LONG == 64 - hash = hash * GOLDEN_RATIO_64; -#else - /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ - u64 n = hash; - n <<= 18; - hash -= n; - n <<= 33; - hash -= n; - n <<= 3; - hash += n; - n <<= 3; - hash -= n; - n <<= 4; - hash += n; - n <<= 2; - hash += n; +/* + * The _generic versions exist only so lib/test_hash.c can compare + * the arch-optimized versions with the generic. + * + * Note that if you change these, any <asm/hash.h> that aren't updated + * to match need to have their HAVE_ARCH_* define values updated so the + * self-test will not false-positive. + */ +#ifndef HAVE_ARCH__HASH_32 +#define __hash_32 __hash_32_generic #endif +static inline u32 __hash_32_generic(u32 val) +{ + return val * GOLDEN_RATIO_32; +} +#ifndef HAVE_ARCH_HASH_32 +#define hash_32 hash_32_generic +#endif +static inline u32 hash_32_generic(u32 val, unsigned int bits) +{ /* High bits are more random, so use them. */ - return hash >> (64 - bits); + return __hash_32(val) >> (32 - bits); } -static inline u32 hash_32(u32 val, unsigned int bits) +#ifndef HAVE_ARCH_HASH_64 +#define hash_64 hash_64_generic +#endif +static __always_inline u32 hash_64_generic(u64 val, unsigned int bits) { - /* On some cpus multiply is faster, on others gcc will do shifts */ - u32 hash = val * GOLDEN_RATIO_PRIME_32; - - /* High bits are more random, so use them. */ - return hash >> (32 - bits); +#if BITS_PER_LONG == 64 + /* 64x64-bit multiply is efficient on all 64-bit processors */ + return val * GOLDEN_RATIO_64 >> (64 - bits); +#else + /* Hash 64 bits using only 32x32-bit multiply. */ + return hash_32((u32)val ^ __hash_32(val >> 32), bits); +#endif } -static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) +static inline u32 hash_ptr(const void *ptr, unsigned int bits) { return hash_long((unsigned long)ptr, bits); } +/* This really should be called fold32_ptr; it does no hashing to speak of. */ static inline u32 hash32_ptr(const void *ptr) { unsigned long val = (unsigned long)ptr; diff --git a/include/linux/host1x.h b/include/linux/host1x.h index d2ba7d334039..1ffbf2a8cb99 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -304,6 +304,8 @@ struct tegra_mipi_device; struct tegra_mipi_device *tegra_mipi_request(struct device *device); void tegra_mipi_free(struct tegra_mipi_device *device); +int tegra_mipi_enable(struct tegra_mipi_device *device); +int tegra_mipi_disable(struct tegra_mipi_device *device); int tegra_mipi_calibrate(struct tegra_mipi_device *device); #endif diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index c98c6539e2c2..5e00f80b1535 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -494,4 +494,11 @@ extern void __init hrtimers_init(void); /* Show pending timers: */ extern void sysrq_timer_list_show(void); +int hrtimers_prepare_cpu(unsigned int cpu); +#ifdef CONFIG_HOTPLUG_CPU +int hrtimers_dead_cpu(unsigned int cpu); +#else +#define hrtimers_dead_cpu NULL +#endif + #endif diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 2790591c77cf..57402544b53f 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -246,7 +246,7 @@ struct hsi_port { int (*stop_tx)(struct hsi_client *cl); int (*release)(struct hsi_client *cl); /* private */ - struct atomic_notifier_head n_head; + struct blocking_notifier_head n_head; }; #define to_hsi_port(dev) container_of(dev, struct hsi_port, device) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index d7b9e5346fba..6f14de45b5ce 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -1,25 +1,17 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - unsigned int flags); +extern int do_huge_pmd_anonymous_page(struct fault_env *fe); extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); -extern void huge_pmd_set_accessed(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pmd_t orig_pmd, int dirty); -extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pmd_t orig_pmd); +extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); +extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); -extern int madvise_free_huge_pmd(struct mmu_gather *tlb, +extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next); extern int zap_huge_pmd(struct mmu_gather *tlb, @@ -28,9 +20,7 @@ extern int zap_huge_pmd(struct mmu_gather *tlb, extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); -extern bool move_huge_pmd(struct vm_area_struct *vma, - struct vm_area_struct *new_vma, - unsigned long old_addr, +extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, @@ -51,6 +41,18 @@ enum transparent_hugepage_flag { #endif }; +struct kobject; +struct kobj_attribute; + +extern ssize_t single_hugepage_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag); +extern ssize_t single_hugepage_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag); +extern struct kobj_attribute shmem_enabled_attr; + #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) @@ -96,7 +98,7 @@ static inline int split_huge_page(struct page *page) void deferred_split_huge_page(struct page *page); void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze); + unsigned long address, bool freeze, struct page *page); #define split_huge_pmd(__vma, __pmd, __address) \ do { \ @@ -104,7 +106,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, if (pmd_trans_huge(*____pmd) \ || pmd_devmap(*____pmd)) \ __split_huge_pmd(__vma, __pmd, __address, \ - false); \ + false, NULL); \ } while (0) @@ -136,8 +138,7 @@ static inline int hpage_nr_pages(struct page *page) return 1; } -extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pmd_t pmd, pmd_t *pmdp); +extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -154,6 +155,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) struct page *get_huge_zero_page(void); void put_huge_zero_page(void); +#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) @@ -163,6 +166,8 @@ void put_huge_zero_page(void); #define transparent_hugepage_enabled(__vma) 0 +static inline void prep_transhuge_page(struct page *page) {} + #define transparent_hugepage_flags 0UL static inline int split_huge_page_to_list(struct page *page, struct list_head *list) @@ -198,8 +203,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, return NULL; } -static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pmd_t pmd, pmd_t *pmdp) +static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) { return 0; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 7d953c2542a8..c26d4638f665 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -338,6 +338,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, /* arch callback */ int __init alloc_bootmem_huge_page(struct hstate *h); +void __init hugetlb_bad_size(void); void __init hugetlb_add_hstate(unsigned order); struct hstate *size_to_hstate(unsigned long size); @@ -352,9 +353,7 @@ extern unsigned int default_hstate_idx; static inline struct hstate *hstate_inode(struct inode *i) { - struct hugetlbfs_sb_info *hsb; - hsb = HUGETLBFS_SB(i->i_sb); - return hsb->hstate; + return HUGETLBFS_SB(i->i_sb)->hstate; } static inline struct hstate *hstate_file(struct file *f) @@ -453,12 +452,12 @@ static inline pgoff_t basepage_index(struct page *page) extern void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); -static inline int hugepage_migration_supported(struct hstate *h) +static inline bool hugepage_migration_supported(struct hstate *h) { #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION return huge_page_shift(h) == PMD_SHIFT; #else - return 0; + return false; #endif } @@ -520,7 +519,7 @@ static inline pgoff_t basepage_index(struct page *page) return page->index; } #define dissolve_free_huge_pages(s, e) do {} while (0) -#define hugepage_migration_supported(h) 0 +#define hugepage_migration_supported(h) false static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 24154c26d469..063962f6dfc6 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -93,20 +93,17 @@ hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page) { - return; } static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) { - return; } static inline void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg) { - return; } static inline void hugetlb_cgroup_file_init(void) @@ -116,7 +113,6 @@ static inline void hugetlb_cgroup_file_init(void) static inline void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) { - return; } #endif /* CONFIG_MEM_RES_CTLR_HUGETLB */ diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 2bb681fbeb35..a4e7ca0f3585 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -5,16 +5,16 @@ #include <linux/mm.h> -static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); } #else -static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { - return 0; + return false; } #endif diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index aa0fadce9308..b10954a66939 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -126,6 +126,8 @@ struct hv_ring_buffer_info { u32 ring_datasize; /* < ring_size */ u32 ring_data_startoffset; + u32 priv_write_index; + u32 priv_read_index; }; /* @@ -151,6 +153,33 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, *read = dsize - *write; } +static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, read; + + dsize = rbi->ring_datasize; + read_loc = rbi->ring_buffer->read_index; + write_loc = READ_ONCE(rbi->ring_buffer->write_index); + + read = write_loc >= read_loc ? (write_loc - read_loc) : + (dsize - read_loc) + write_loc; + + return read; +} + +static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, write; + + dsize = rbi->ring_datasize; + read_loc = READ_ONCE(rbi->ring_buffer->read_index); + write_loc = rbi->ring_buffer->write_index; + + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + return write; +} + /* * VMBUS version is 32 bit entity broken up into * two 16 bit quantities: major_number. minor_number. @@ -1091,7 +1120,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t min, resource_size_t max, resource_size_t size, resource_size_t align, bool fb_overlap_ok); - +void vmbus_free_mmio(resource_size_t start, resource_size_t size); int vmbus_cpu_number_to_vp_number(int cpu_number); u64 hv_do_hypercall(u64 control, void *input, void *output); @@ -1338,4 +1367,143 @@ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, const uuid_le *shv_host_servie_id); +void vmbus_set_event(struct vmbus_channel *channel); + +/* Get the start of the ring buffer. */ +static inline void * +hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) +{ + return (void *)ring_info->ring_buffer->buffer; +} + +/* + * To optimize the flow management on the send-side, + * when the sender is blocked because of lack of + * sufficient space in the ring buffer, potential the + * consumer of the ring buffer can signal the producer. + * This is controlled by the following parameters: + * + * 1. pending_send_sz: This is the size in bytes that the + * producer is trying to send. + * 2. The feature bit feat_pending_send_sz set to indicate if + * the consumer of the ring will signal when the ring + * state transitions from being full to a state where + * there is room for the producer to send the pending packet. + */ + +static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) +{ + u32 cur_write_sz; + u32 pending_sz; + + /* + * Issue a full memory barrier before making the signaling decision. + * Here is the reason for having this barrier: + * If the reading of the pend_sz (in this function) + * were to be reordered and read before we commit the new read + * index (in the calling function) we could + * have a problem. If the host were to set the pending_sz after we + * have sampled pending_sz and go to sleep before we commit the + * read index, we could miss sending the interrupt. Issue a full + * memory barrier to address this. + */ + virt_mb(); + + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + /* If the other end is not blocked on write don't bother. */ + if (pending_sz == 0) + return false; + + cur_write_sz = hv_get_bytes_to_write(rbi); + + if (cur_write_sz >= pending_sz) + return true; + + return false; +} + +/* + * An API to support in-place processing of incoming VMBUS packets. + */ +#define VMBUS_PKT_TRAILER 8 + +static inline struct vmpacket_descriptor * +get_next_pkt_raw(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + void *ring_buffer = hv_get_ring_buffer(ring_info); + struct vmpacket_descriptor *cur_desc; + u32 packetlen; + u32 dsize = ring_info->ring_datasize; + u32 delta = read_loc - ring_info->ring_buffer->read_index; + u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); + + if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) + return NULL; + + if ((read_loc + sizeof(*cur_desc)) > dsize) + return NULL; + + cur_desc = ring_buffer + read_loc; + packetlen = cur_desc->len8 << 3; + + /* + * If the packet under consideration is wrapping around, + * return failure. + */ + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) + return NULL; + + return cur_desc; +} + +/* + * A helper function to step through packets "in-place" + * This API is to be called after each successful call + * get_next_pkt_raw(). + */ +static inline void put_pkt_raw(struct vmbus_channel *channel, + struct vmpacket_descriptor *desc) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + u32 packetlen = desc->len8 << 3; + u32 dsize = ring_info->ring_datasize; + + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) + BUG(); + /* + * Include the packet trailer. + */ + ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; +} + +/* + * This call commits the read index and potentially signals the host. + * Here is the pattern for using the "in-place" consumption APIs: + * + * while (get_next_pkt_raw() { + * process the packet "in-place"; + * put_pkt_raw(); + * } + * if (packets processed in place) + * commit_rd_index(); + */ +static inline void commit_rd_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ + virt_rmb(); + ring_info->ring_buffer->read_index = ring_info->priv_read_index; + + if (hv_need_to_signal_on_read(ring_info)) + vmbus_set_event(channel); +} + + #endif /* _HYPERV_H */ diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h index b5f9a007a3ab..d4c1d12f900d 100644 --- a/include/linux/i2c-mux.h +++ b/include/linux/i2c-mux.h @@ -27,22 +27,49 @@ #ifdef __KERNEL__ +#include <linux/bitops.h> + +struct i2c_mux_core { + struct i2c_adapter *parent; + struct device *dev; + bool mux_locked; + + void *priv; + + int (*select)(struct i2c_mux_core *, u32 chan_id); + int (*deselect)(struct i2c_mux_core *, u32 chan_id); + + int num_adapters; + int max_adapters; + struct i2c_adapter *adapter[0]; +}; + +struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, + struct device *dev, int max_adapters, + int sizeof_priv, u32 flags, + int (*select)(struct i2c_mux_core *, u32), + int (*deselect)(struct i2c_mux_core *, u32)); + +/* flags for i2c_mux_alloc */ +#define I2C_MUX_LOCKED BIT(0) + +static inline void *i2c_mux_priv(struct i2c_mux_core *muxc) +{ + return muxc->priv; +} + +struct i2c_adapter *i2c_root_adapter(struct device *dev); + /* - * Called to create a i2c bus on a multiplexed bus segment. - * The mux_dev and chan_id parameters are passed to the select - * and deselect callback functions to perform hardware-specific - * mux control. + * Called to create an i2c bus on a multiplexed bus segment. + * The chan_id parameter is passed to the select and deselect + * callback functions to perform hardware-specific mux control. */ -struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, - struct device *mux_dev, - void *mux_priv, u32 force_nr, u32 chan_id, - unsigned int class, - int (*select) (struct i2c_adapter *, - void *mux_dev, u32 chan_id), - int (*deselect) (struct i2c_adapter *, - void *mux_dev, u32 chan_id)); - -void i2c_del_mux_adapter(struct i2c_adapter *adap); +int i2c_mux_add_adapter(struct i2c_mux_core *muxc, + u32 force_nr, u32 chan_id, + unsigned int class); + +void i2c_mux_del_adapters(struct i2c_mux_core *muxc); #endif /* __KERNEL__ */ diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 8f1b086ca5bc..c2e3324f9468 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -23,6 +23,8 @@ #define _LINUX_I2C_SMBUS_H #include <linux/i2c.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> /** @@ -48,4 +50,31 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); +/** + * smbus_host_notify - internal structure used by the Host Notify mechanism. + * @adapter: the I2C adapter associated with this struct + * @work: worker used to schedule the IRQ in the slave device + * @lock: spinlock to check if a notification is already pending + * @pending: flag set when a notification is pending (any new notification will + * be rejected if pending is true) + * @payload: the actual payload of the Host Notify event + * @addr: the address of the slave device which raised the notification + * + * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does + * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a + * managed resource to clean this up when the adapter get released. + */ +struct smbus_host_notify { + struct i2c_adapter *adapter; + struct work_struct work; + spinlock_t lock; + bool pending; + u16 payload; + u8 addr; +}; + +struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap); +int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify, + unsigned short addr, unsigned int data); + #endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 200cf13b00f6..fffdc270ca18 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -126,6 +126,11 @@ i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, u8 command, u8 length, u8 *values); #endif /* I2C */ +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY, +}; + /** * struct i2c_driver - represent an I2C device driver * @class: What kind of i2c device we instantiate (for detect) @@ -180,8 +185,11 @@ struct i2c_driver { * The format and meaning of the data value depends on the protocol. * For the SMBus alert protocol, there is a single bit of data passed * as the alert response's low bit ("event flag"). + * For the SMBus Host Notify protocol, the data corresponds to the + * 16-bit payload data reported by the slave device acting as master. */ - void (*alert)(struct i2c_client *, unsigned int data); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol, + unsigned int data); /* a ioctl like command that can be used to perform specific functions * with the device. @@ -349,6 +357,11 @@ extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); extern struct i2c_client * i2c_new_dummy(struct i2c_adapter *adap, u16 address); +extern struct i2c_client * +i2c_new_secondary_device(struct i2c_client *client, + const char *name, + u16 default_addr); + extern void i2c_unregister_device(struct i2c_client *); #endif /* I2C */ @@ -524,6 +537,7 @@ struct i2c_adapter { /* data fields that are valid for all devices */ struct rt_mutex bus_lock; + struct rt_mutex mux_lock; int timeout; /* in jiffies */ int retries; @@ -538,6 +552,10 @@ struct i2c_adapter { struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; + + void (*lock_bus)(struct i2c_adapter *, unsigned int flags); + int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); + void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) @@ -567,8 +585,44 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); /* Adapter locking functions, exported for shared pin cases */ -void i2c_lock_adapter(struct i2c_adapter *); -void i2c_unlock_adapter(struct i2c_adapter *); +#define I2C_LOCK_ROOT_ADAPTER BIT(0) +#define I2C_LOCK_SEGMENT BIT(1) + +/** + * i2c_lock_bus - Get exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT + * locks only this branch in the adapter tree + */ +static inline void +i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->lock_bus(adapter, flags); +} + +/** + * i2c_unlock_bus - Release exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT + * unlocks only this branch in the adapter tree + */ +static inline void +i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->unlock_bus(adapter, flags); +} + +static inline void +i2c_lock_adapter(struct i2c_adapter *adapter) +{ + i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); +} + +static inline void +i2c_unlock_adapter(struct i2c_adapter *adapter) +{ + i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); +} /*flags for the client struct: */ #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ @@ -654,6 +708,11 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) return adap->nr; } +static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) +{ + return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); +} + /** * module_i2c_driver() - Helper macro for registering a modular I2C driver * @__i2c_driver: i2c_driver struct diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h deleted file mode 100644 index 52baa79d69a7..000000000000 --- a/include/linux/i2c/sx150x.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Driver for the Semtech SX150x I2C GPIO Expanders - * - * Copyright (c) 2010, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ -#ifndef __LINUX_I2C_SX150X_H -#define __LINUX_I2C_SX150X_H - -/** - * struct sx150x_platform_data - config data for SX150x driver - * @gpio_base: The index number of the first GPIO assigned to this - * GPIO expander. The expander will create a block of - * consecutively numbered gpios beginning at the given base, - * with the size of the block depending on the model of the - * expander chip. - * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO - * instead of as an oscillator, increasing the size of the - * GP(I)O pool created by this expander by one. The - * output-only GPO pin will be added at the end of the block. - * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor - * for each IO line in the expander. Setting the bit at - * position n will enable the pull-up for the IO at - * the corresponding offset. For chips with fewer than - * 16 IO pins, high-end bits are ignored. - * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down - * resistor for each IO line in the expander. Setting the - * bit at position n will enable the pull-down for the IO at - * the corresponding offset. For chips with fewer than - * 16 IO pins, high-end bits are ignored. - * @io_open_drain_ena: A bit-mask which enables-or disables open-drain - * operation for each IO line in the expander. Setting the - * bit at position n enables open-drain operation for - * the IO at the corresponding offset. Clearing the bit - * enables regular push-pull operation for that IO. - * For chips with fewer than 16 IO pins, high-end bits - * are ignored. - * @io_polarity: A bit-mask which enables polarity inversion for each IO line - * in the expander. Setting the bit at position n inverts - * the polarity of that IO line, while clearing it results - * in normal polarity. For chips with fewer than 16 IO pins, - * high-end bits are ignored. - * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line - * is connected, via which it reports interrupt events - * across all GPIO lines. This must be a real, - * pre-existing IRQ line. - * Setting this value < 0 disables the irq_chip functionality - * of the driver. - * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based - * IRQ lines will appear. Similarly to gpio_base, the expander - * will create a block of irqs beginning at this number. - * This value is ignored if irq_summary is < 0. - * @reset_during_probe: If set to true, the driver will trigger a full - * reset of the chip at the beginning of the probe - * in order to place it in a known state. - */ -struct sx150x_platform_data { - unsigned gpio_base; - bool oscio_is_gpo; - u16 io_pullup_ena; - u16 io_pulldn_ena; - u16 io_open_drain_ena; - u16 io_polarity; - int irq_summary; - unsigned irq_base; - bool reset_during_probe; -}; - -#endif /* __LINUX_I2C_SX150X_H */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 630f45335c73..57086e9fc64c 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -14,9 +14,12 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #if IS_ENABLED(CONFIG_IPV6) extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); -typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info); +typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, + unsigned int data_len); #else diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 3b1f6cef9513..a80516fd65c8 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -7,6 +7,7 @@ * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,6 +19,7 @@ #include <linux/types.h> #include <linux/if_ether.h> +#include <linux/etherdevice.h> #include <asm/byteorder.h> #include <asm/unaligned.h> @@ -163,6 +165,9 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ #define IEEE80211_MAX_FRAME_LEN 2352 +/* Maximal size of an A-MSDU that can be transported in a HT BA session */ +#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095 + /* Maximal size of an A-MSDU */ #define IEEE80211_MAX_MPDU_LEN_HT_3839 3839 #define IEEE80211_MAX_MPDU_LEN_HT_7935 7935 @@ -637,6 +642,16 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; } +/** + * ieee80211_is_frag - check if a frame is a fragment + * @hdr: 802.11 header of the frame + */ +static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) +{ + return ieee80211_has_morefrags(hdr->frame_control) || + hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); +} + struct ieee80211s_hdr { u8 flags; u8 ttl; @@ -1011,6 +1026,16 @@ struct ieee80211_mgmt { u8 tpc_elem_length; struct ieee80211_tpc_report_ie tpc; } __packed tpc_report; + struct { + u8 action_code; + u8 dialog_token; + u8 follow_up; + u8 tod[6]; + u8 toa[6]; + __le16 tod_error; + __le16 toa_error; + u8 variable[0]; + } __packed ftm; } u; } __packed action; } u; @@ -2440,7 +2465,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) */ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) { - if (skb->len < 25) + if (skb->len < IEEE80211_MIN_ACTION_SIZE) return false; return _ieee80211_is_robust_mgmt_frame((void *)skb->data); } @@ -2463,6 +2488,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, } /** + * _ieee80211_is_group_privacy_action - check if frame is a group addressed + * privacy action frame + * @hdr: the frame + */ +static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) +{ + struct ieee80211_mgmt *mgmt = (void *)hdr; + + if (!ieee80211_is_action(hdr->frame_control) || + !is_multicast_ether_addr(hdr->addr1)) + return false; + + return mgmt->u.action.category == WLAN_CATEGORY_MESH_ACTION || + mgmt->u.action.category == WLAN_CATEGORY_MULTIHOP_ACTION; +} + +/** + * ieee80211_is_group_privacy_action - check if frame is a group addressed + * privacy action frame + * @skb: the skb containing the frame, length will be checked + */ +static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) +{ + if (skb->len < IEEE80211_MIN_ACTION_SIZE) + return false; + return _ieee80211_is_group_privacy_action((void *)skb->data); +} + +/** * ieee80211_tu_to_usec - convert time units (TU) to microseconds * @tu: the TUs */ diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index d3e415674dac..ddb890174a0e 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -31,6 +31,8 @@ #define IEEE802154_MIN_PSDU_LEN 9 #define IEEE802154_FCS_LEN 2 #define IEEE802154_MAX_AUTH_TAG_LEN 16 +#define IEEE802154_FC_LEN 2 +#define IEEE802154_SEQ_LEN 1 /* General MAC frame format: * 2 bytes: Frame Control @@ -47,6 +49,8 @@ #define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe #define IEEE802154_EXTENDED_ADDR_LEN 8 +#define IEEE802154_SHORT_ADDR_LEN 2 +#define IEEE802154_PAN_ID_LEN 2 #define IEEE802154_LIFS_PERIOD 40 #define IEEE802154_SIFS_PERIOD 12 @@ -218,10 +222,19 @@ enum { /* frame control handling */ #define IEEE802154_FCTL_FTYPE 0x0003 #define IEEE802154_FCTL_ACKREQ 0x0020 +#define IEEE802154_FCTL_SECEN 0x0004 #define IEEE802154_FCTL_INTRA_PAN 0x0040 +#define IEEE802154_FCTL_DADDR 0x0c00 +#define IEEE802154_FCTL_SADDR 0xc000 #define IEEE802154_FTYPE_DATA 0x0001 +#define IEEE802154_FCTL_ADDR_NONE 0x0000 +#define IEEE802154_FCTL_DADDR_SHORT 0x0800 +#define IEEE802154_FCTL_DADDR_EXTENDED 0x0c00 +#define IEEE802154_FCTL_SADDR_SHORT 0x8000 +#define IEEE802154_FCTL_SADDR_EXTENDED 0xc000 + /* * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA * @fc: frame control bytes in little-endian byteorder @@ -233,6 +246,15 @@ static inline int ieee802154_is_data(__le16 fc) } /** + * ieee802154_is_secen - check if Security bit is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_secen(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_SECEN); +} + +/** * ieee802154_is_ackreq - check if acknowledgment request bit is set * @fc: frame control bytes in little-endian byteorder */ @@ -250,6 +272,24 @@ static inline bool ieee802154_is_intra_pan(__le16 fc) return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN); } +/* + * ieee802154_daddr_mode - get daddr mode from fc + * @fc: frame control bytes in little-endian byteorder + */ +static inline __le16 ieee802154_daddr_mode(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_DADDR); +} + +/* + * ieee802154_saddr_mode - get saddr mode from fc + * @fc: frame control bytes in little-endian byteorder + */ +static inline __le16 ieee802154_saddr_mode(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_SADDR); +} + /** * ieee802154_is_valid_psdu_len - check if psdu len is valid * available lengths: @@ -260,17 +300,17 @@ static inline bool ieee802154_is_intra_pan(__le16 fc) * * @len: psdu len with (MHR + payload + MFR) */ -static inline bool ieee802154_is_valid_psdu_len(const u8 len) +static inline bool ieee802154_is_valid_psdu_len(u8 len) { return (len == IEEE802154_ACK_PSDU_LEN || (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU)); } /** - * ieee802154_is_valid_psdu_len - check if extended addr is valid + * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid * @addr: extended addr to check */ -static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr) +static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr) { /* Bail out if the address is all zero, or if the group * address bit is set. @@ -280,6 +320,34 @@ static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr) } /** + * ieee802154_is_broadcast_short_addr - check if short addr is broadcast + * @addr: short addr to check + */ +static inline bool ieee802154_is_broadcast_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)); +} + +/** + * ieee802154_is_unspec_short_addr - check if short addr is unspecified + * @addr: short addr to check + */ +static inline bool ieee802154_is_unspec_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC)); +} + +/** + * ieee802154_is_valid_src_short_addr - check if source short address is valid + * @addr: short addr to check + */ +static inline bool ieee802154_is_valid_src_short_addr(__le16 addr) +{ + return !(ieee802154_is_broadcast_short_addr(addr) || + ieee802154_is_unspec_short_addr(addr)); +} + +/** * ieee802154_random_extended_addr - generates a random extended address * @addr: extended addr pointer to place the random address */ diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 2ec3ad58e8a0..70a5164f4728 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -83,10 +83,12 @@ struct iio_buffer_access_funcs { * @access: [DRIVER] buffer access functions associated with the * implementation. * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes. + * @buffer_group: [INTERN] attributes of the buffer group * @scan_el_group: [DRIVER] attribute group for those attributes not * created from the iio_chan_info array. * @pollq: [INTERN] wait queue to allow for polling on the buffer. * @stufftoread: [INTERN] flag to indicate new data. + * @attrs: [INTERN] standard attributes of the buffer * @demux_list: [INTERN] list of operations required to demux the scan. * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. * @buffer_list: [INTERN] entry in the devices list of current buffers. diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 6670c3d25c58..228bd44efa4c 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -37,6 +37,7 @@ #define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20 #define ST_SENSORS_DEFAULT_AXIS_MASK 0x07 #define ST_SENSORS_DEFAULT_AXIS_N_BIT 3 +#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27 #define ST_SENSORS_MAX_NAME 17 #define ST_SENSORS_MAX_4WAI 7 @@ -121,6 +122,9 @@ struct st_sensor_bdu { * @mask_int2: mask to enable/disable IRQ on INT2 pin. * @addr_ihl: address to enable/disable active low on the INT lines. * @mask_ihl: mask to enable/disable active low on the INT lines. + * @addr_od: address to enable/disable Open Drain on the INT lines. + * @mask_od: mask to enable/disable Open Drain on the INT lines. + * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. @@ -131,6 +135,9 @@ struct st_sensor_data_ready_irq { u8 mask_int2; u8 addr_ihl; u8 mask_ihl; + u8 addr_od; + u8 mask_od; + u8 addr_stat_drdy; struct { u8 en_addr; u8 en_mask; @@ -212,9 +219,13 @@ struct st_sensor_settings { * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). + * @int_pin_open_drain: Set the interrupt/DRDY to open drain. * @get_irq_data_ready: Function to get the IRQ used for data ready signal. * @tf: Transfer function structure used by I/O operations. * @tb: Transfer buffers and mutex used by I/O operations. + * @edge_irq: the IRQ triggers on edges and need special handling. + * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. + * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. */ struct st_sensor_data { struct device *dev; @@ -233,17 +244,20 @@ struct st_sensor_data { unsigned int num_data_channels; u8 drdy_int_pin; + bool int_pin_open_drain; unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); const struct st_sensor_transfer_function *tf; struct st_sensor_transfer_buffer tb; + + bool edge_irq; + bool hw_irq_trigger; + s64 hw_timestamp; }; #ifdef CONFIG_IIO_BUFFER irqreturn_t st_sensors_trigger_handler(int irq, void *p); - -int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf); #endif #ifdef CONFIG_IIO_TRIGGER @@ -251,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops); void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); - +int st_sensors_validate_device(struct iio_trigger *trig, + struct iio_dev *indio_dev); #else static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) @@ -262,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) { return; } +#define st_sensors_validate_device NULL #endif int st_sensors_init_sensor(struct iio_dev *indio_dev, @@ -271,7 +287,7 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable); int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); -void st_sensors_power_enable(struct iio_dev *indio_dev); +int st_sensors_power_enable(struct iio_dev *indio_dev); void st_sensors_power_disable(struct iio_dev *indio_dev); diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index fad58671c49e..3d672f72e7ec 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -49,6 +49,33 @@ struct iio_channel *iio_channel_get(struct device *dev, void iio_channel_release(struct iio_channel *chan); /** + * devm_iio_channel_get() - Resource managed version of iio_channel_get(). + * @dev: Pointer to consumer device. Device name must match + * the name of the device as provided in the iio_map + * with which the desired provider to consumer mapping + * was registered. + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels use within + * the consumer. E.g. 'battery_voltage' + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns valid pointer for iio channel. + * + * The allocated iio channel is automatically released when the device is + * unbound. + */ +struct iio_channel *devm_iio_channel_get(struct device *dev, + const char *consumer_channel); +/** + * devm_iio_channel_release() - Resource managed version of + * iio_channel_release(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: The channel to be released. + */ +void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); + +/** * iio_channel_get_all() - get all channels associated with a client * @dev: Pointer to consumer device. * @@ -65,6 +92,32 @@ struct iio_channel *iio_channel_get_all(struct device *dev); */ void iio_channel_release_all(struct iio_channel *chan); +/** + * devm_iio_channel_get_all() - Resource managed version of + * iio_channel_get_all(). + * @dev: Pointer to consumer device. + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns an array of iio_channel structures terminated with one with + * null iio_dev pointer. + * + * This function is used by fairly generic consumers to get all the + * channels registered as having this consumer. + * + * The allocated iio channels are automatically released when the device is + * unbounded. + */ +struct iio_channel *devm_iio_channel_get_all(struct device *dev); + +/** + * devm_iio_channel_release_all() - Resource managed version of + * iio_channel_release_all(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: Array channel to be released. + */ +void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); + struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b2b16772c651..854e2dad1e0d 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -148,6 +148,37 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev, } /** + * struct iio_mount_matrix - iio mounting matrix + * @rotation: 3 dimensional space rotation matrix defining sensor alignment with + * main hardware + */ +struct iio_mount_matrix { + const char *rotation[9]; +}; + +ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, + const struct iio_chan_spec *chan, char *buf); +int of_iio_read_mount_matrix(const struct device *dev, const char *propname, + struct iio_mount_matrix *matrix); + +typedef const struct iio_mount_matrix * + (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan); + +/** + * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute + * @_shared: Whether the attribute is shared between all channels + * @_get: Pointer to an iio_get_mount_matrix_t accessor + */ +#define IIO_MOUNT_MATRIX(_shared, _get) \ +{ \ + .name = "mount_matrix", \ + .shared = (_shared), \ + .read = iio_show_mount_matrix, \ + .private = (uintptr_t)(_get), \ +} + +/** * struct iio_event_spec - specification for a channel event * @type: Type of the event * @dir: Direction of the event @@ -281,13 +312,8 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, }, \ } -/** - * iio_get_time_ns() - utility function to get a time stamp for events etc - **/ -static inline s64 iio_get_time_ns(void) -{ - return ktime_get_real_ns(); -} +s64 iio_get_time_ns(const struct iio_dev *indio_dev); +unsigned int iio_get_time_res(const struct iio_dev *indio_dev); /* Device operating modes */ #define INDIO_DIRECT_MODE 0x01 @@ -466,6 +492,7 @@ struct iio_buffer_setup_ops { * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. * @info: [DRIVER] callbacks and constant info from driver + * @clock_id: [INTERN] timestamping clock posix identifier * @info_exist_lock: [INTERN] lock to prevent use during removal * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable @@ -506,6 +533,7 @@ struct iio_dev { struct attribute_group chan_attr_group; const char *name; const struct iio_info *info; + clockid_t clock_id; struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; struct cdev chrdev; @@ -527,12 +555,14 @@ void iio_device_unregister(struct iio_dev *indio_dev); int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev); void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); +int iio_device_claim_direct_mode(struct iio_dev *indio_dev); +void iio_device_release_direct_mode(struct iio_dev *indio_dev); extern struct bus_type iio_bus_type; /** * iio_device_put() - reference counted deallocation of struct device - * @indio_dev: IIO device structure containing the device + * @indio_dev: IIO device structure containing the device **/ static inline void iio_device_put(struct iio_dev *indio_dev) { @@ -541,6 +571,15 @@ static inline void iio_device_put(struct iio_dev *indio_dev) } /** + * iio_device_get_clock() - Retrieve current timestamping clock for the device + * @indio_dev: IIO device structure containing the device + */ +static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) +{ + return indio_dev->clock_id; +} + +/** * dev_to_iio_dev() - Get IIO device struct from a device struct * @dev: The device embedded in the IIO device * diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index fa2d01ef8f55..360da7d18a3d 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -41,6 +41,7 @@ struct adis_data { unsigned int diag_stat_reg; unsigned int self_test_mask; + bool self_test_no_autoclear; unsigned int startup_delay; const char * const *status_error_msgs; diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h new file mode 100644 index 000000000000..c8400959d197 --- /dev/null +++ b/include/linux/iio/magnetometer/ak8975.h @@ -0,0 +1,16 @@ +#ifndef __IIO_MAGNETOMETER_AK8975_H__ +#define __IIO_MAGNETOMETER_AK8975_H__ + +#include <linux/iio/iio.h> + +/** + * struct ak8975_platform_data - AK8975 magnetometer driver platform data + * @eoc_gpio: data ready event gpio + * @orientation: mounting matrix relative to main hardware + */ +struct ak8975_platform_data { + int eoc_gpio; + struct iio_mount_matrix orientation; +}; + +#endif diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h new file mode 100644 index 000000000000..23ca41515527 --- /dev/null +++ b/include/linux/iio/sw_device.h @@ -0,0 +1,70 @@ +/* + * Industrial I/O software device interface + * + * Copyright (c) 2016 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __IIO_SW_DEVICE +#define __IIO_SW_DEVICE + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/configfs.h> + +#define module_iio_sw_device_driver(__iio_sw_device_type) \ + module_driver(__iio_sw_device_type, iio_register_sw_device_type, \ + iio_unregister_sw_device_type) + +struct iio_sw_device_ops; + +struct iio_sw_device_type { + const char *name; + struct module *owner; + const struct iio_sw_device_ops *ops; + struct list_head list; + struct config_group *group; +}; + +struct iio_sw_device { + struct iio_dev *device; + struct iio_sw_device_type *device_type; + struct config_group group; +}; + +struct iio_sw_device_ops { + struct iio_sw_device* (*probe)(const char *); + int (*remove)(struct iio_sw_device *); +}; + +static inline +struct iio_sw_device *to_iio_sw_device(struct config_item *item) +{ + return container_of(to_config_group(item), struct iio_sw_device, + group); +} + +int iio_register_sw_device_type(struct iio_sw_device_type *dt); +void iio_unregister_sw_device_type(struct iio_sw_device_type *dt); + +struct iio_sw_device *iio_sw_device_create(const char *, const char *); +void iio_sw_device_destroy(struct iio_sw_device *); + +int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt); +void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); + +static inline +void iio_swd_group_init_type_name(struct iio_sw_device *d, + const char *name, + struct config_item_type *type) +{ +#ifdef CONFIG_CONFIGFS_FS + config_group_init_type_name(&d->group, name, type); +#endif +} + +#endif /* __IIO_SW_DEVICE */ diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h index 5198f8ed08a4..c97eab67558f 100644 --- a/include/linux/iio/sw_trigger.h +++ b/include/linux/iio/sw_trigger.h @@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t, const char *name, struct config_item_type *type) { -#ifdef CONFIG_CONFIGFS_FS +#if IS_ENABLED(CONFIG_CONFIGFS_FS) config_group_init_type_name(&t->group, name, type); #endif } diff --git a/include/linux/ima.h b/include/linux/ima.h index e6516cbbe9bf..0eb7c2e7f0d6 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -21,6 +21,7 @@ extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); +extern void ima_post_path_mknod(struct dentry *dentry); #else static inline int ima_bprm_check(struct linux_binprm *bprm) @@ -54,6 +55,11 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size, return 0; } +static inline void ima_post_path_mknod(struct dentry *dentry) +{ + return; +} + #endif /* CONFIG_IMA */ #ifdef CONFIG_IMA_APPRAISE diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 7c27fa1030e8..feb04ea20f11 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -52,6 +52,12 @@ struct sock *inet_diag_find_one_icsk(struct net *net, int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); +void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); + +int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, + struct inet_diag_msg *r, int ext, + struct user_namespace *user_ns); + extern int inet_diag_register(const struct inet_diag_handler *handler); extern void inet_diag_unregister(const struct inet_diag_handler *handler); #endif /* _INET_DIAG_H_ */ diff --git a/include/linux/init.h b/include/linux/init.h index aedb254abc37..6935d02474aa 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -77,12 +77,6 @@ #define __refdata __section(.ref.data) #define __refconst __constsection(.ref.rodata) -/* compatibility defines */ -#define __init_refok __ref -#define __initdata_refok __refdata -#define __exit_refok __ref - - #ifdef MODULE #define __exitused #else diff --git a/include/linux/init_task.h b/include/linux/init_task.h index f2cb8d45513d..f8834f820ec2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -190,7 +190,7 @@ extern struct task_group root_task_group; #define INIT_TASK(tsk) \ { \ .state = 0, \ - .stack = &init_thread_info, \ + .stack = init_stack, \ .usage = ATOMIC_INIT(2), \ .flags = PF_KTHREAD, \ .prio = MAX_PRIO-20, \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9fcabeb07787..b6683f0ffc9f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -278,6 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); +struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); + #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) @@ -308,6 +310,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { return 0; } + +static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) +{ + *nr_vecs = 1; + return NULL; +} #endif /* CONFIG_SMP */ /* diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index 11d7e840d913..defcc4644ce3 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) writel(val, addr); } +static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl_relaxed(p + 1); + low = readl_relaxed(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val >> 32, addr + 4); + writel_relaxed(val, addr); +} + #ifndef readq #define readq hi_lo_readq #endif @@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) #define writeq hi_lo_writeq #endif +#ifndef readq_relaxed +#define readq_relaxed hi_lo_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed hi_lo_writeq_relaxed +#endif + #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index 1a4315f97360..084461a4e5ab 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) writel(val >> 32, addr + 4); } +static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl_relaxed(p); + high = readl_relaxed(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val, addr); + writel_relaxed(val >> 32, addr + 4); +} + #ifndef readq #define readq lo_hi_readq #endif @@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) #define writeq lo_hi_writeq #endif +#ifndef readq_relaxed +#define readq_relaxed lo_hi_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed lo_hi_writeq_relaxed +#endif + #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index e399029b68c5..645ad06b5d52 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr) } static inline void __iomem * -io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_wc(struct io_mapping *mapping, + unsigned long offset, + unsigned long size) { resource_size_t phys_addr; BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - return ioremap_wc(phys_addr, PAGE_SIZE); + return ioremap_wc(phys_addr, size); } static inline void @@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr) /* Non-atomic map/unmap */ static inline void __iomem * -io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_wc(struct io_mapping *mapping, + unsigned long offset, + unsigned long size) { return ((char __force __iomem *) mapping) + offset; } diff --git a/include/linux/iomap.h b/include/linux/iomap.h new file mode 100644 index 000000000000..3d70ece10313 --- /dev/null +++ b/include/linux/iomap.h @@ -0,0 +1,76 @@ +#ifndef LINUX_IOMAP_H +#define LINUX_IOMAP_H 1 + +#include <linux/types.h> + +struct fiemap_extent_info; +struct inode; +struct iov_iter; +struct kiocb; +struct vm_area_struct; +struct vm_fault; + +/* + * Types of block ranges for iomap mappings: + */ +#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ +#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ +#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */ +#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ + +/* + * Flags for iomap mappings: + */ +#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */ + +/* + * Magic value for blkno: + */ +#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ + +struct iomap { + sector_t blkno; /* 1st sector of mapping, 512b units */ + loff_t offset; /* file offset of mapping, bytes */ + u64 length; /* length of mapping, bytes */ + u16 type; /* type of mapping */ + u16 flags; /* flags for mapping */ + struct block_device *bdev; /* block device for I/O */ +}; + +/* + * Flags for iomap_begin / iomap_end. No flag implies a read. + */ +#define IOMAP_WRITE (1 << 0) +#define IOMAP_ZERO (1 << 1) + +struct iomap_ops { + /* + * Return the existing mapping at pos, or reserve space starting at + * pos for up to length, as long as we can do it as a single mapping. + * The actual length is returned in iomap->length. + */ + int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length, + unsigned flags, struct iomap *iomap); + + /* + * Commit and/or unreserve space previous allocated using iomap_begin. + * Written indicates the length of the successful write operation which + * needs to be commited, while the rest needs to be unreserved. + * Written might be zero if no data was written. + */ + int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length, + ssize_t written, unsigned flags, struct iomap *iomap); +}; + +ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, + struct iomap_ops *ops); +int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, + bool *did_zero, struct iomap_ops *ops); +int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, + struct iomap_ops *ops); +int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + struct iomap_ops *ops); +int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + loff_t start, loff_t len, struct iomap_ops *ops); + +#endif /* LINUX_IOMAP_H */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ef7a6ecd8584..a35fb8b42e1a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -30,6 +30,7 @@ #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_NOEXEC (1 << 3) +#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ struct iommu_ops; struct iommu_group; @@ -78,6 +79,7 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; const struct iommu_ops *ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; @@ -150,13 +152,13 @@ struct iommu_dm_region { * @domain_set_attr: Change domain attributes * @get_dm_regions: Request list of direct mapping requirements for a device * @put_dm_regions: Free list of direct mapping requirements for a device + * @apply_dm_region: Temporary helper call-back for iova reserved ranges * @domain_window_enable: Configure and enable a particular window for a domain * @domain_window_disable: Disable a particular window for a domain * @domain_set_windows: Set the number of windows for a domain * @domain_get_windows: Return the number of windows for a domain * @of_xlate: add OF master IDs to iommu grouping - * @pgsize_bitmap: bitmap of supported page sizes - * @priv: per-instance data private to the iommu driver + * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -185,6 +187,8 @@ struct iommu_ops { /* Request/Free a list of direct mapping requirements for a device */ void (*get_dm_regions)(struct device *dev, struct list_head *list); void (*put_dm_regions)(struct device *dev, struct list_head *list); + void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, + struct iommu_dm_region *region); /* Window handling functions */ int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, @@ -198,7 +202,6 @@ struct iommu_ops { int (*of_xlate)(struct device *dev, struct of_phandle_args *args); unsigned long pgsize_bitmap; - void *priv; }; #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 0b65543dc6cf..6230064d7f95 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -26,6 +26,9 @@ struct resource { /* * IO resources have these defined flags. + * + * PCI devices expose these flags to userspace in the "resource" sysfs file, + * so don't move them. */ #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ @@ -110,6 +113,7 @@ struct resource { /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ +#define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */ /* * I/O Resource Descriptors diff --git a/include/linux/iova.h b/include/linux/iova.h index 92f7177db2ce..f27bb2c62fca 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -19,8 +19,21 @@ /* iova structure */ struct iova { struct rb_node node; - unsigned long pfn_hi; /* IOMMU dish out addr hi */ - unsigned long pfn_lo; /* IOMMU dish out addr lo */ + unsigned long pfn_hi; /* Highest allocated pfn */ + unsigned long pfn_lo; /* Lowest allocated pfn */ +}; + +struct iova_magazine; +struct iova_cpu_rcache; + +#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ +#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ + +struct iova_rcache { + spinlock_t lock; + unsigned long depot_size; + struct iova_magazine *depot[MAX_GLOBAL_MAGS]; + struct iova_cpu_rcache __percpu *cpu_rcaches; }; /* holds all the iova translations for a domain */ @@ -31,6 +44,7 @@ struct iova_domain { unsigned long granule; /* pfn granularity for this domain */ unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; + struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ }; static inline unsigned long iova_size(struct iova *iova) @@ -78,6 +92,10 @@ void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned); +void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, + unsigned long size); +unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); @@ -87,5 +105,6 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); +void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #endif diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 1eee6bcfcf76..d10e54f03c09 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -63,8 +63,6 @@ struct ipc_namespace { }; extern struct ipc_namespace init_ipc_ns; -extern atomic_t nr_ipc_ns; - extern spinlock_t mq_lock; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 838dbfa3c331..78c5d5ae3857 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -277,7 +277,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len); */ enum ipmi_addr_src { SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, - SI_PCI, SI_DEVICETREE, SI_DEFAULT + SI_PCI, SI_DEVICETREE, SI_LAST }; const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7edc14fb66b6..c6dbcd84a2c7 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -63,7 +63,8 @@ struct ipv6_devconf { } stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; - void *sysctl; + + struct ctl_table_header *sysctl_header; }; struct ipv6_params { @@ -117,14 +118,29 @@ struct inet6_skb_parm { #define IP6SKB_ROUTERALERT 8 #define IP6SKB_FRAGMENTED 16 #define IP6SKB_HOPBYHOP 32 +#define IP6SKB_L3SLAVE 64 }; +#if defined(CONFIG_NET_L3_MASTER_DEV) +static inline bool skb_l3mdev_slave(__u16 flags) +{ + return flags & IP6SKB_L3SLAVE; +} +#else +static inline bool skb_l3mdev_slave(__u16 flags) +{ + return false; +} +#endif + #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) #define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb)) static inline int inet6_iif(const struct sk_buff *skb) { - return IP6CB(skb)->iif; + bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); + + return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; } struct tcp6_request_sock { @@ -267,6 +283,8 @@ struct tcp6_timewait_sock { }; #if IS_ENABLED(CONFIG_IPV6) +bool ipv6_mod_enabled(void); + static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) { return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; @@ -310,6 +328,11 @@ static inline int inet_v6_ipv6only(const struct sock *sk) #define ipv6_only_sock(sk) 0 #define ipv6_sk_rxinfo(sk) 0 +static inline bool ipv6_mod_enabled(void) +{ + return false; +} + static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) { return NULL; diff --git a/include/linux/irq.h b/include/linux/irq.h index c4de62348ff2..0ac26c892fe2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -197,6 +197,7 @@ struct irq_data { * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU + * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -212,6 +213,7 @@ enum { IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_WAKEUP_ARMED = (1 << 19), IRQD_FORWARDED_TO_VCPU = (1 << 20), + IRQD_AFFINITY_MANAGED = (1 << 21), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } +static inline bool irqd_affinity_is_managed(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) @@ -315,6 +322,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) /** * struct irq_chip - hardware interrupt chip descriptor * + * @parent_device: pointer to parent device for irqchip * @name: name for /proc/interrupts * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) @@ -354,6 +362,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @flags: chip specific flags */ struct irq_chip { + struct device *parent_device; const char *name; unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); @@ -482,12 +491,15 @@ extern void handle_fasteoi_irq(struct irq_desc *desc); extern void handle_edge_irq(struct irq_desc *desc); extern void handle_edge_eoi_irq(struct irq_desc *desc); extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_untracked_irq(struct irq_desc *desc); extern void handle_percpu_irq(struct irq_desc *desc); extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); +extern int irq_chip_pm_get(struct irq_data *data); +extern int irq_chip_pm_put(struct irq_data *data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data); @@ -530,6 +542,10 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c } extern int irq_set_percpu_devid(unsigned int irq); +extern int irq_set_percpu_devid_partition(unsigned int irq, + const struct cpumask *affinity); +extern int irq_get_percpu_devid_partition(unsigned int irq, + struct cpumask *affinity); extern void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, @@ -697,11 +713,11 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner); + struct module *owner, const struct cpumask *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ - __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) #define irq_alloc_desc(node) \ irq_alloc_descs(-1, 0, 1, node) @@ -929,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } #endif +/* + * The irqsave variants are for usage in non interrupt code. Do not use + * them in irq_chip callbacks. Use irq_gc_lock() instead. + */ +#define irq_gc_lock_irqsave(gc, flags) \ + raw_spin_lock_irqsave(&(gc)->lock, flags) + +#define irq_gc_unlock_irqrestore(gc, flags) \ + raw_spin_unlock_irqrestore(&(gc)->lock, flags) + static inline void irq_reg_writel(struct irq_chip_generic *gc, u32 val, int reg_offset) { diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index 1551b5b2f4c2..f0f5d2671509 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -34,7 +34,7 @@ struct irq_bypass_consumer; /** * struct irq_bypass_producer - IRQ bypass producer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer + * @token: opaque token to match between producer and consumer (non-NULL) * @irq: Linux IRQ number for the producer device * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) @@ -60,7 +60,7 @@ struct irq_bypass_producer { /** * struct irq_bypass_consumer - IRQ bypass consumer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer + * @token: opaque token to match between producer and consumer (non-NULL) * @add_producer: Connect the IRQ consumer to an IRQ producer * @del_producer: Disconnect the IRQ consumer from an IRQ producer * @stop: Perform any quiesce operations necessary prior to add/del (optional) diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h new file mode 100644 index 000000000000..c647b0547bcd --- /dev/null +++ b/include/linux/irqchip/arm-gic-common.h @@ -0,0 +1,34 @@ +/* + * include/linux/irqchip/arm-gic-common.h + * + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H +#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H + +#include <linux/types.h> +#include <linux/ioport.h> + +enum gic_type { + GIC_V2, + GIC_V3, +}; + +struct gic_kvm_info { + /* GIC type */ + enum gic_type type; + /* Virtual CPU interface */ + struct resource vcpu; + /* Interrupt number */ + unsigned int maint_irq; + /* Virtual control interface */ + struct resource vctrl; +}; + +const struct gic_kvm_info *gic_get_kvm_info(void); + +#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index d5d798b35c1f..99ac022edc60 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -102,8 +102,6 @@ #define GICR_SYNCR 0x00C0 #define GICR_MOVLPIR 0x0100 #define GICR_MOVALLR 0x0110 -#define GICR_ISACTIVER GICD_ISACTIVER -#define GICR_ICACTIVER GICD_ICACTIVER #define GICR_IDREGS GICD_IDREGS #define GICR_PIDR2 GICD_PIDR2 @@ -114,34 +112,76 @@ #define GICR_WAKER_ProcessorSleep (1U << 1) #define GICR_WAKER_ChildrenAsleep (1U << 2) -#define GICR_PROPBASER_NonShareable (0U << 10) -#define GICR_PROPBASER_InnerShareable (1U << 10) -#define GICR_PROPBASER_OuterShareable (2U << 10) -#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10) -#define GICR_PROPBASER_nCnB (0U << 7) -#define GICR_PROPBASER_nC (1U << 7) -#define GICR_PROPBASER_RaWt (2U << 7) -#define GICR_PROPBASER_RaWb (3U << 7) -#define GICR_PROPBASER_WaWt (4U << 7) -#define GICR_PROPBASER_WaWb (5U << 7) -#define GICR_PROPBASER_RaWaWt (6U << 7) -#define GICR_PROPBASER_RaWaWb (7U << 7) -#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) -#define GICR_PROPBASER_IDBITS_MASK (0x1f) - -#define GICR_PENDBASER_NonShareable (0U << 10) -#define GICR_PENDBASER_InnerShareable (1U << 10) -#define GICR_PENDBASER_OuterShareable (2U << 10) -#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10) -#define GICR_PENDBASER_nCnB (0U << 7) -#define GICR_PENDBASER_nC (1U << 7) -#define GICR_PENDBASER_RaWt (2U << 7) -#define GICR_PENDBASER_RaWb (3U << 7) -#define GICR_PENDBASER_WaWt (4U << 7) -#define GICR_PENDBASER_WaWb (5U << 7) -#define GICR_PENDBASER_RaWaWt (6U << 7) -#define GICR_PENDBASER_RaWaWb (7U << 7) -#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) /* * Re-Distributor registers, offsets from SGI_base @@ -177,59 +217,91 @@ #define GITS_CWRITER 0x0088 #define GITS_CREADR 0x0090 #define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 #define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc #define GITS_TRANSLATER 0x10040 #define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_QUIESCENT (1U << 31) +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) - -#define GITS_CBASER_VALID (1UL << 63) -#define GITS_CBASER_nCnB (0UL << 59) -#define GITS_CBASER_nC (1UL << 59) -#define GITS_CBASER_RaWt (2UL << 59) -#define GITS_CBASER_RaWb (3UL << 59) -#define GITS_CBASER_WaWt (4UL << 59) -#define GITS_CBASER_WaWb (5UL << 59) -#define GITS_CBASER_RaWaWt (6UL << 59) -#define GITS_CBASER_RaWaWb (7UL << 59) -#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59) -#define GITS_CBASER_NonShareable (0UL << 10) -#define GITS_CBASER_InnerShareable (1UL << 10) -#define GITS_CBASER_OuterShareable (2UL << 10) -#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10) +#define GITS_TYPER_HWCOLLCNT_SHIFT 24 + +#define GITS_CBASER_VALID (1UL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) #define GITS_BASER_NR_REGS 8 -#define GITS_BASER_VALID (1UL << 63) -#define GITS_BASER_nCnB (0UL << 59) -#define GITS_BASER_nC (1UL << 59) -#define GITS_BASER_RaWt (2UL << 59) -#define GITS_BASER_RaWb (3UL << 59) -#define GITS_BASER_WaWt (4UL << 59) -#define GITS_BASER_WaWb (5UL << 59) -#define GITS_BASER_RaWaWt (6UL << 59) -#define GITS_BASER_RaWaWb (7UL << 59) -#define GITS_BASER_CACHEABILITY_MASK (7UL << 59) -#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_VALID (1UL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) -#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) -#define GITS_BASER_NonShareable (0UL << 10) -#define GITS_BASER_InnerShareable (1UL << 10) -#define GITS_BASER_OuterShareable (2UL << 10) #define GITS_BASER_SHAREABILITY_SHIFT (10) -#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) #define GITS_BASER_PAGE_SIZE_SHIFT (8) #define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 @@ -240,12 +312,17 @@ #define GITS_BASER_TYPE_RESERVED6 6 #define GITS_BASER_TYPE_RESERVED7 7 +#define GITS_LVL1_ENTRY_SIZE (8UL) + /* * ITS commands */ #define GITS_CMD_MAPD 0x08 #define GITS_CMD_MAPC 0x09 -#define GITS_CMD_MAPVI 0x0a +#define GITS_CMD_MAPTI 0x0a +/* older GIC documentation used MAPVI for this command */ +#define GITS_CMD_MAPVI GITS_CMD_MAPTI +#define GITS_CMD_MAPI 0x0b #define GITS_CMD_MOVI 0x01 #define GITS_CMD_DISCARD 0x0f #define GITS_CMD_INV 0x0c @@ -256,6 +333,23 @@ #define GITS_CMD_SYNC 0x05 /* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* * CPU interface registers */ #define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1) @@ -275,6 +369,12 @@ #define ICH_LR_ACTIVE_BIT (1ULL << 63) #define ICH_LR_PHYS_ID_SHIFT 32 #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) #define ICH_MISR_EOI (1 << 0) #define ICH_MISR_U (1 << 1) @@ -301,12 +401,12 @@ #define ICC_SGI1R_AFFINITY_1_SHIFT 16 #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) #define ICC_SGI1R_SGI_ID_SHIFT 24 -#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) #define ICC_SGI1R_AFFINITY_2_SHIFT 32 -#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 #define ICC_SGI1R_AFFINITY_3_SHIFT 48 -#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) #include <asm/arch_gicv3.h> diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 9c940263ca23..eafc965b3eb8 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -33,6 +33,7 @@ #define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTR 0x004 +#define GIC_DIST_IIDR 0x008 #define GIC_DIST_IGROUP 0x080 #define GIC_DIST_ENABLE_SET 0x100 #define GIC_DIST_ENABLE_CLEAR 0x180 @@ -76,6 +77,7 @@ #define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT) +#define GICH_LR_PRIORITY_SHIFT 23 #define GICH_LR_STATE (3 << 28) #define GICH_LR_PENDING_BIT (1 << 28) #define GICH_LR_ACTIVE_BIT (1 << 29) @@ -99,9 +101,14 @@ #include <linux/irqdomain.h> struct device_node; +struct gic_chip_data; void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); +void gic_cpu_save(struct gic_chip_data *gic); +void gic_cpu_restore(struct gic_chip_data *gic); +void gic_dist_save(struct gic_chip_data *gic); +void gic_dist_restore(struct gic_chip_data *gic); /* * Subdrivers that need some preparatory work can initialize their @@ -110,6 +117,12 @@ int gic_cpu_if_down(unsigned int gic_nr); int gic_of_init(struct device_node *node, struct device_node *parent); /* + * Initialises and registers a non-root or child GIC chip. Memory for + * the gic_chip_data structure is dynamically allocated. + */ +int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); + +/* * Legacy platforms not converted to DT yet must use this to init * their GIC */ diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h new file mode 100644 index 000000000000..87433a5d1285 --- /dev/null +++ b/include/linux/irqchip/irq-partition-percpu.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/fwnode.h> +#include <linux/cpumask.h> +#include <linux/irqdomain.h> + +struct partition_affinity { + cpumask_t mask; + void *partition_id; +}; + +struct partition_desc; + +#ifdef CONFIG_PARTITION_PERCPU +int partition_translate_id(struct partition_desc *desc, void *partition_id); +struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, + struct partition_affinity *parts, + int nr_parts, + int chained_irq, + const struct irq_domain_ops *ops); +struct irq_domain *partition_get_domain(struct partition_desc *dsc); +#else +static inline int partition_translate_id(struct partition_desc *desc, + void *partition_id) +{ + return -EINVAL; +} + +static inline +struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, + struct partition_affinity *parts, + int nr_parts, + int chained_irq, + const struct irq_domain_ops *ops) +{ + return NULL; +} + +static inline +struct irq_domain *partition_get_domain(struct partition_desc *dsc) +{ + return NULL; +} +#endif diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index 80f89e4a29ac..81f930b0bca9 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h @@ -103,6 +103,7 @@ #define GIC_VPE_SWINT0_MAP_OFS 0x0054 #define GIC_VPE_SWINT1_MAP_OFS 0x0058 #define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VP_IDENT_OFS 0x0088 #define GIC_VPE_WD_CONFIG0_OFS 0x0090 #define GIC_VPE_WD_COUNT0_OFS 0x0094 #define GIC_VPE_WD_INITIAL0_OFS 0x0098 @@ -211,6 +212,10 @@ #define GIC_VPE_SMASK_FDC_SHF 6 #define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF) +/* GIC_VP_IDENT fields */ +#define GIC_VP_IDENT_VCNUM_SHF 0 +#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF) + /* GIC nomenclature for Core Interrupt Pins. */ #define GIC_CPU_INT0 0 /* Core Interrupt 2 */ #define GIC_CPU_INT1 1 /* . */ @@ -278,4 +283,16 @@ static inline int gic_get_usm_range(struct resource *gic_usm_res) #endif /* CONFIG_MIPS_GIC */ +/** + * gic_read_local_vp_id() - read the local VPs VCNUM + * + * Read the VCNUM of the local VP from the GIC_VP_IDENT register and + * return it to the caller. This ID should be used to refer to the VP + * via the GICs VP-other region, or when calculating an offset to a + * bit representing the VP in interrupt masks. + * + * Return: The VCNUM value for the local VP. + */ +extern unsigned gic_read_local_vp_id(void); + #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index dcca77c4b9d2..b51beebf9804 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -66,6 +66,7 @@ struct irq_desc { int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; #ifdef CONFIG_SMP const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 2aed04396210..ffb84604c1de 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -39,6 +39,7 @@ struct irq_domain; struct of_device_id; struct irq_chip; struct irq_data; +struct cpumask; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -96,6 +97,8 @@ enum irq_domain_bus_token { struct irq_domain_ops { int (*match)(struct irq_domain *d, struct device_node *node, enum irq_domain_bus_token bus_token); + int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token); int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); void (*unmap)(struct irq_domain *d, unsigned int virq); int (*xlate)(struct irq_domain *d, struct device_node *node, @@ -211,11 +214,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); -extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, +extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, - irq_hw_number_t hwirq, int node); + irq_hw_number_t hwirq, int node, + const struct cpumask *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { @@ -227,6 +231,17 @@ static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) return fwnode && fwnode->type == FWNODE_IRQCHIP; } +static inline +struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, + enum irq_domain_bus_token bus_token) +{ + struct irq_fwspec fwspec = { + .fwnode = fwnode, + }; + + return irq_find_matching_fwspec(&fwspec, bus_token); +} + static inline struct irq_domain *irq_find_matching_host(struct device_node *node, enum irq_domain_bus_token bus_token) { @@ -346,9 +361,8 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, irq_hw_number_t *out_hwirq, unsigned int *out_type); /* IPI functions */ -unsigned int irq_reserve_ipi(struct irq_domain *domain, - const struct cpumask *dest); -void irq_destroy_ipi(unsigned int irq); +int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); +int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); /* V2 interfaces to support hierarchy IRQ domains. */ extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, @@ -377,7 +391,7 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, - bool realloc); + bool realloc, const struct cpumask *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern void irq_domain_activate_irq(struct irq_data *irq_data); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); @@ -385,7 +399,8 @@ extern void irq_domain_deactivate_irq(struct irq_data *irq_data); static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { - return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, + NULL); } extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, @@ -440,6 +455,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, return -1; } +static inline void irq_domain_free_irqs(unsigned int virq, + unsigned int nr_irqs) { } + static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return false; diff --git a/include/linux/isa.h b/include/linux/isa.h index b0270e3814c8..f2d0258414cf 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h @@ -6,6 +6,7 @@ #define __LINUX_ISA_H #include <linux/device.h> +#include <linux/errno.h> #include <linux/kernel.h> struct isa_driver { @@ -22,13 +23,13 @@ struct isa_driver { #define to_isa_driver(x) container_of((x), struct isa_driver, driver) -#ifdef CONFIG_ISA +#ifdef CONFIG_ISA_BUS_API int isa_register_driver(struct isa_driver *, unsigned int); void isa_unregister_driver(struct isa_driver *); #else static inline int isa_register_driver(struct isa_driver *d, unsigned int i) { - return 0; + return -ENODEV; } static inline void isa_unregister_driver(struct isa_driver *d) @@ -36,4 +37,36 @@ static inline void isa_unregister_driver(struct isa_driver *d) } #endif +/** + * module_isa_driver() - Helper macro for registering a ISA driver + * @__isa_driver: isa_driver struct + * @__num_isa_dev: number of devices to register + * + * Helper macro for ISA drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate code. Each module may only + * use this macro once, and calling it replaces module_init and module_exit. + */ +#define module_isa_driver(__isa_driver, __num_isa_dev) \ +static int __init __isa_driver##_init(void) \ +{ \ + return isa_register_driver(&(__isa_driver), __num_isa_dev); \ +} \ +module_init(__isa_driver##_init); \ +static void __exit __isa_driver##_exit(void) \ +{ \ + isa_unregister_driver(&(__isa_driver)); \ +} \ +module_exit(__isa_driver##_exit); + +/** + * max_num_isa_dev() - Maximum possible number registered of an ISA device + * @__ida_dev_ext: ISA device address extent + * + * The highest base address possible for an ISA device is 0x3FF; this results in + * 1024 possible base addresses. Dividing the number of possible base addresses + * by the address extent taken by each device results in the maximum number of + * devices on a system. + */ +#define max_num_isa_dev(__isa_dev_ext) (1024 / __isa_dev_ext) + #endif /* __LINUX_ISA_H */ diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h index 548d55395488..10923d730486 100644 --- a/include/linux/iscsi_boot_sysfs.h +++ b/include/linux/iscsi_boot_sysfs.h @@ -64,6 +64,12 @@ enum iscsi_boot_initiator_properties_enum { ISCSI_BOOT_INI_END_MARKER, }; +enum iscsi_boot_acpitbl_properties_enum { + ISCSI_BOOT_ACPITBL_SIGNATURE, + ISCSI_BOOT_ACPITBL_OEM_ID, + ISCSI_BOOT_ACPITBL_OEM_TABLE_ID, +}; + struct attribute_group; struct iscsi_boot_kobj { @@ -127,6 +133,13 @@ iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); +struct iscsi_boot_kobj * +iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show)(void *data, int type, char *buf), + umode_t (*is_visible)(void *data, int type), + void (*release)(void *data)); + struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index fd1083c46c61..dfaa1f4dcb0c 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -403,11 +403,19 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) /* Flags in jbd_inode->i_flags */ #define __JI_COMMIT_RUNNING 0 -/* Commit of the inode data in progress. We use this flag to protect us from +#define __JI_WRITE_DATA 1 +#define __JI_WAIT_DATA 2 + +/* + * Commit of the inode data in progress. We use this flag to protect us from * concurrent deletion of inode. We cannot use reference to inode for this * since we cannot afford doing last iput() on behalf of kjournald */ #define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) +/* Write allocated dirty buffers in this inode before commit */ +#define JI_WRITE_DATA (1 << __JI_WRITE_DATA) +/* Wait for outstanding data writes for this inode before commit */ +#define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** * struct jbd_inode is the structure linking inodes in ordered mode @@ -483,10 +491,6 @@ struct jbd2_journal_handle unsigned long h_start_jiffies; unsigned int h_requested_credits; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map h_lockdep_map; -#endif }; @@ -781,13 +785,11 @@ jbd2_time_diff(unsigned long start, unsigned long end) * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the * number that will fit in j_blocksize * @j_last_sync_writer: most recent pid which did a synchronous write - * @j_history: Buffer storing the transactions statistics history - * @j_history_max: Maximum number of transactions in the statistics history - * @j_history_cur: Current number of transactions in the statistics history * @j_history_lock: Protect the transactions statistics history * @j_proc_entry: procfs entry for the jbd statistics directory * @j_stats: Overall statistics * @j_private: An opaque pointer to fs-private information. + * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies */ struct journal_s @@ -1030,8 +1032,26 @@ struct journal_s /* Precomputed journal UUID checksum for seeding other checksums */ __u32 j_csum_seed; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Lockdep entity to track transaction commit dependencies. Handles + * hold this "lock" for read, when we wait for commit, we acquire the + * "lock" for writing. This matches the properties of jbd2 journalling + * where the running transaction has to wait for all handles to be + * dropped to commit that transaction and also acquiring a handle may + * require transaction commit to finish. + */ + struct lockdep_map j_trans_commit_map; +#endif }; +#define jbd2_might_wait_for_commit(j) \ + do { \ + rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ + rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ + } while (0) + /* journal feature predicate functions */ #define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline bool jbd2_has_feature_##name(journal_t *j) \ @@ -1270,7 +1290,8 @@ extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); -extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0536524bb9eb..661af564fae8 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -76,7 +76,6 @@ #include <linux/types.h> #include <linux/compiler.h> -#include <linux/bug.h> extern bool static_key_initialized; @@ -115,13 +114,6 @@ enum jump_label_type { struct module; -#include <linux/atomic.h> - -static inline int static_key_count(struct static_key *key) -{ - return atomic_read(&key->enabled); -} - #ifdef HAVE_JUMP_LABEL #define JUMP_TYPE_FALSE 0UL @@ -152,16 +144,34 @@ extern int jump_label_text_reserved(void *start, void *end); extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); extern void jump_label_apply_nops(struct module *mod); +extern int static_key_count(struct static_key *key); +extern void static_key_enable(struct static_key *key); +extern void static_key_disable(struct static_key *key); +/* + * We should be using ATOMIC_INIT() for initializing .enabled, but + * the inclusion of atomic.h is problematic for inclusion of jump_label.h + * in 'low-level' headers. Thus, we are initializing .enabled with a + * raw value, but have added a BUILD_BUG_ON() to catch any issues in + * jump_label_init() see: kernel/jump_label.c. + */ #define STATIC_KEY_INIT_TRUE \ - { .enabled = ATOMIC_INIT(1), \ + { .enabled = { 1 }, \ .entries = (void *)JUMP_TYPE_TRUE } #define STATIC_KEY_INIT_FALSE \ - { .enabled = ATOMIC_INIT(0), \ + { .enabled = { 0 }, \ .entries = (void *)JUMP_TYPE_FALSE } #else /* !HAVE_JUMP_LABEL */ +#include <linux/atomic.h> +#include <linux/bug.h> + +static inline int static_key_count(struct static_key *key) +{ + return atomic_read(&key->enabled); +} + static __always_inline void jump_label_init(void) { static_key_initialized = true; @@ -206,14 +216,6 @@ static inline int jump_label_apply_nops(struct module *mod) return 0; } -#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } -#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } - -#endif /* HAVE_JUMP_LABEL */ - -#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE -#define jump_label_enabled static_key_enabled - static inline void static_key_enable(struct static_key *key) { int count = static_key_count(key); @@ -234,6 +236,14 @@ static inline void static_key_disable(struct static_key *key) static_key_slow_dec(key); } +#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } +#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } + +#endif /* HAVE_JUMP_LABEL */ + +#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE +#define jump_label_enabled static_key_enabled + /* -------------------------------------------------------------------------- */ /* diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h new file mode 100644 index 000000000000..b7f8aced7870 --- /dev/null +++ b/include/linux/kasan-checks.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_KASAN_CHECKS_H +#define _LINUX_KASAN_CHECKS_H + +#ifdef CONFIG_KASAN +void kasan_check_read(const void *p, unsigned int size); +void kasan_check_write(const void *p, unsigned int size); +#else +static inline void kasan_check_read(const void *p, unsigned int size) { } +static inline void kasan_check_write(const void *p, unsigned int size) { } +#endif + +#endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 737371b56044..d600303306eb 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -50,20 +50,23 @@ void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags); +void kasan_cache_shrink(struct kmem_cache *cache); +void kasan_cache_destroy(struct kmem_cache *cache); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void kasan_poison_object_data(struct kmem_cache *cache, void *object); +void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); void kasan_kfree_large(const void *ptr); -void kasan_kfree(void *ptr); +void kasan_poison_kfree(void *ptr); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags); void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); -void kasan_slab_free(struct kmem_cache *s, void *object); +bool kasan_slab_free(struct kmem_cache *s, void *object); struct kasan_cache { int alloc_meta_offset; @@ -73,6 +76,10 @@ struct kasan_cache { int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); +size_t ksize(const void *); +static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t kasan_metadata_size(struct kmem_cache *cache); + #else /* CONFIG_KASAN */ static inline void kasan_unpoison_shadow(const void *address, size_t size) {} @@ -88,16 +95,20 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) {} +static inline void kasan_cache_shrink(struct kmem_cache *cache) {} +static inline void kasan_cache_destroy(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} static inline void kasan_poison_object_data(struct kmem_cache *cache, void *object) {} +static inline void kasan_init_slab_obj(struct kmem_cache *cache, + const void *object) {} static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} static inline void kasan_kfree_large(const void *ptr) {} -static inline void kasan_kfree(void *ptr) {} +static inline void kasan_poison_kfree(void *ptr) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) {} static inline void kasan_krealloc(const void *object, size_t new_size, @@ -105,11 +116,17 @@ static inline void kasan_krealloc(const void *object, size_t new_size, static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) {} -static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} +static inline bool kasan_slab_free(struct kmem_cache *s, void *object) +{ + return false; +} static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} +static inline void kasan_unpoison_slab(const void *ptr) { } +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } + #endif /* CONFIG_KASAN */ #endif /* LINUX_KASAN_H */ diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index b33c7797eb57..15ec117ec537 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -3,6 +3,21 @@ #include <generated/autoconf.h> +#define __ARG_PLACEHOLDER_1 0, +#define __take_second_arg(__ignored, val, ...) val + +/* + * The use of "&&" / "||" is limited in certain expressions. + * The followings enable to calculate "and" / "or" with macro expansion only. + */ +#define __and(x, y) ___and(x, y) +#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) +#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0) + +#define __or(x, y) ___or(x, y) +#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y) +#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y) + /* * Helper macros to use CONFIG_ options in C/CPP expressions. Note that * these only work with boolean and tristate options. @@ -16,11 +31,10 @@ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when * the last step cherry picks the 2nd arg, we get a zero. */ -#define __ARG_PLACEHOLDER_1 0, -#define config_enabled(cfg) _config_enabled(cfg) -#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) -#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) -#define ___config_enabled(__ignored, val, ...) val +#define config_enabled(cfg) ___is_defined(cfg) +#define __is_defined(x) ___is_defined(x) +#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) +#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) /* * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 @@ -41,14 +55,13 @@ * This is similar to IS_ENABLED(), but returns false when invoked from * built-in code when CONFIG_FOO is set to 'm'. */ -#define IS_REACHABLE(option) (config_enabled(option) || \ - (config_enabled(option##_MODULE) && config_enabled(MODULE))) +#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \ + __and(IS_MODULE(option), __is_defined(MODULE))) /* * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', * 0 otherwise. */ -#define IS_ENABLED(option) \ - (IS_BUILTIN(option) || IS_MODULE(option)) +#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) #endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h index a19bcf9e762e..410decacff8f 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -177,7 +177,7 @@ extern int kdb_get_kbd_char(void); static inline int kdb_process_cpu(const struct task_struct *p) { - unsigned int cpu = task_thread_info(p)->cpu; + unsigned int cpu = task_cpu(p); if (cpu > num_possible_cpus()) cpu = 0; return cpu; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2f7775e229b0..d96a6118d26a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -11,7 +11,6 @@ #include <linux/log2.h> #include <linux/typecheck.h> #include <linux/printk.h> -#include <linux/dynamic_debug.h> #include <asm/byteorder.h> #include <uapi/linux/kernel.h> @@ -53,6 +52,13 @@ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define u64_to_user_ptr(x) ( \ +{ \ + typecheck(u64, x); \ + (void __user *)(uintptr_t)x; \ +} \ +) + /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be @@ -412,9 +418,9 @@ extern __printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...); extern __printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); -extern __printf(2, 3) +extern __printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...); -extern __printf(2, 0) +extern __printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); extern __printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); @@ -444,6 +450,7 @@ extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; +extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 25a822f6f000..44fda64ad434 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -92,7 +92,6 @@ static inline void account_process_tick(struct task_struct *tsk, int user) extern void account_process_tick(struct task_struct *, int user); #endif -extern void account_steal_ticks(unsigned long ticks); extern void account_idle_ticks(unsigned long ticks); #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 30f089ebe0a4..96356ef012de 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -179,6 +179,7 @@ struct kernfs_open_file { /* private fields, do not use outside kernfs proper */ struct mutex mutex; + struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 2cc643c6e870..d7437777baaa 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -14,6 +14,8 @@ #if !defined(__ASSEMBLY__) +#include <asm/io.h> + #include <uapi/linux/kexec.h> #ifdef CONFIG_KEXEC_CORE @@ -41,7 +43,7 @@ #endif #ifndef KEXEC_CONTROL_MEMORY_GFP -#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL +#define KEXEC_CONTROL_MEMORY_GFP (GFP_KERNEL | __GFP_NORETRY) #endif #ifndef KEXEC_CONTROL_PAGE_SIZE @@ -228,14 +230,13 @@ extern void *kexec_purgatory_get_symbol_addr(struct kimage *image, extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); +int kexec_crash_loaded(void); void crash_save_cpu(struct pt_regs *regs, int cpu); void crash_save_vmcoreinfo(void); -void crash_map_reserved_pages(void); -void crash_unmap_reserved_pages(void); void arch_crash_save_vmcoreinfo(void); __printf(1, 2) void vmcoreinfo_append_str(const char *fmt, ...); -unsigned long paddr_vmcoreinfo_note(void); +phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) @@ -317,6 +318,46 @@ int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int relsec); int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int relsec); +void arch_kexec_protect_crashkres(void); +void arch_kexec_unprotect_crashkres(void); + +#ifndef page_to_boot_pfn +static inline unsigned long page_to_boot_pfn(struct page *page) +{ + return page_to_pfn(page); +} +#endif + +#ifndef boot_pfn_to_page +static inline struct page *boot_pfn_to_page(unsigned long boot_pfn) +{ + return pfn_to_page(boot_pfn); +} +#endif + +#ifndef phys_to_boot_phys +static inline unsigned long phys_to_boot_phys(phys_addr_t phys) +{ + return phys; +} +#endif + +#ifndef boot_phys_to_phys +static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) +{ + return boot_phys; +} +#endif + +static inline unsigned long virt_to_boot_phys(void *addr) +{ + return phys_to_boot_phys(__pa((unsigned long)addr)); +} + +static inline void *boot_phys_to_virt(unsigned long entry) +{ + return phys_to_virt(boot_phys_to_phys(entry)); +} #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; @@ -324,6 +365,7 @@ struct task_struct; static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } +static inline int kexec_crash_loaded(void) { return 0; } #define kexec_in_progress false #endif /* CONFIG_KEXEC_CORE */ diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 7463355a198b..eaee981c5558 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -45,7 +45,6 @@ struct key_preparsed_payload { size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ time_t expiry; /* Expiry time of key */ - bool trusted; /* True if key is trusted */ }; typedef int (*request_key_actor_t)(struct key_construction *key, diff --git a/include/linux/key.h b/include/linux/key.h index 5f5b1129dc92..722914798f37 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -173,11 +173,9 @@ struct key { #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ -#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */ -#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */ -#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */ -#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */ -#define KEY_FLAG_KEEP 12 /* set if key should not be removed */ +#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ +#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ +#define KEY_FLAG_KEEP 10 /* set if key should not be removed */ /* the key type and key description string * - the desc is used to match a key against search criteria @@ -205,6 +203,20 @@ struct key { }; int reject_error; }; + + /* This is set on a keyring to restrict the addition of a link to a key + * to it. If this method isn't provided then it is assumed that the + * keyring is open to any addition. It is ignored for non-keyring + * keys. + * + * This is intended for use with rings of trusted keys whereby addition + * to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION + * overrides this, allowing the kernel to add extra keys without + * restriction. + */ + int (*restrict_link)(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); }; extern struct key *key_alloc(struct key_type *type, @@ -212,14 +224,17 @@ extern struct key *key_alloc(struct key_type *type, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, - unsigned long flags); + unsigned long flags, + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *)); -#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ -#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ -#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ -#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */ -#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */ +#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ +#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ +#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ +#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ +#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); @@ -288,8 +303,15 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid const struct cred *cred, key_perm_t perm, unsigned long flags, + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *), struct key *dest); +extern int restrict_link_reject(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); + extern int keyring_clear(struct key *keyring); extern key_ref_t keyring_search(key_ref_t keyring, diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index eeb307985715..1e032a1ddb3e 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -4,6 +4,11 @@ #include <linux/sched.h> /* MMF_VM_HUGEPAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern struct attribute_group khugepaged_attr_group; + +extern int khugepaged_init(void); +extern void khugepaged_destroy(void); +extern int start_stop_khugepaged(void); extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 7ae216a39c9e..481c8c4627ca 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -43,8 +43,7 @@ static inline struct stable_node *page_stable_node(struct page *page) static inline void set_page_stable_node(struct page *page, struct stable_node *stable_node) { - page->mapping = (void *)stable_node + - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); } /* diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5276fe0916fc..9c28b4d4c90b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -35,6 +35,10 @@ #include <asm/kvm_host.h> +#ifndef KVM_MAX_VCPU_ID +#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#endif + /* * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used * in kvm, other bits are visible for userspace which are defined in @@ -160,6 +164,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev); +struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, + gpa_t addr); #ifdef CONFIG_KVM_ASYNC_PF struct kvm_async_pf { @@ -225,6 +231,7 @@ struct kvm_vcpu { sigset_t sigset; struct kvm_vcpu_stat stat; unsigned int halt_poll_ns; + bool valid_wakeup; #ifdef CONFIG_HAS_IOMEM int mmio_needed; @@ -310,7 +317,13 @@ struct kvm_kernel_irq_routing_entry { unsigned irqchip; unsigned pin; } irqchip; - struct msi_msg msi; + struct { + u32 address_lo; + u32 address_hi; + u32 data; + u32 flags; + u32 devid; + } msi; struct kvm_s390_adapter_int adapter; struct kvm_hv_sint hv_sint; }; @@ -366,7 +379,15 @@ struct kvm { struct srcu_struct srcu; struct srcu_struct irq_srcu; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + + /* + * created_vcpus is protected by kvm->lock, and is incremented + * at the beginning of KVM_CREATE_VCPU. online_vcpus is only + * incremented after storing the kvm_vcpu pointer in vcpus, + * and is accessed atomically. + */ atomic_t online_vcpus; + int created_vcpus; int last_boosted_vcpu; struct list_head vm_list; struct mutex lock; @@ -407,6 +428,8 @@ struct kvm { #endif long tlbs_dirty; struct list_head devices; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; }; #define kvm_err(fmt, ...) \ @@ -447,12 +470,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { - struct kvm_vcpu *vcpu; + struct kvm_vcpu *vcpu = NULL; int i; - if (id < 0 || id >= KVM_MAX_VCPUS) + if (id < 0) return NULL; - vcpu = kvm_get_vcpu(kvm, id); + if (id < KVM_MAX_VCPUS) + vcpu = kvm_get_vcpu(kvm, id); if (vcpu && vcpu->vcpu_id == id) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -651,6 +675,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); +void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); @@ -858,45 +883,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm, } #endif -/* must be called with irqs disabled */ -static inline void __kvm_guest_enter(void) -{ - guest_enter(); - /* KVM does not hold any references to rcu protected data when it - * switches CPU into a guest mode. In fact switching to a guest mode - * is very similar to exiting to userspace from rcu point of view. In - * addition CPU may stay in a guest mode for quite a long time (up to - * one time slice). Lets treat guest mode as quiescent state, just like - * we do with user-mode execution. - */ - if (!context_tracking_cpu_is_enabled()) - rcu_virt_note_context_switch(smp_processor_id()); -} - -/* must be called with irqs disabled */ -static inline void __kvm_guest_exit(void) -{ - guest_exit(); -} - -static inline void kvm_guest_enter(void) -{ - unsigned long flags; - - local_irq_save(flags); - __kvm_guest_enter(); - local_irq_restore(flags); -} - -static inline void kvm_guest_exit(void) -{ - unsigned long flags; - - local_irq_save(flags); - __kvm_guest_exit(); - local_irq_restore(flags); -} - /* * search_memslots() and __gfn_to_memslot() are here because they are * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. @@ -984,6 +970,11 @@ enum kvm_stat_kind { KVM_STAT_VCPU, }; +struct kvm_stat_data { + int offset; + struct kvm *kvm; +}; + struct kvm_stats_debugfs_item { const char *name; int offset; @@ -1018,17 +1009,18 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) #ifdef CONFIG_S390 #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... +#elif defined(CONFIG_ARM64) +#define KVM_MAX_IRQ_ROUTES 4096 #else #define KVM_MAX_IRQ_ROUTES 1024 #endif -int kvm_setup_default_irq_routing(struct kvm *kvm); -int kvm_setup_empty_irq_routing(struct kvm *kvm); int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, unsigned flags); -int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue); void kvm_free_irq_routing(struct kvm *kvm); @@ -1083,14 +1075,13 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) #endif /* CONFIG_HAVE_KVM_EVENTFD */ -#ifdef CONFIG_KVM_APIC_ARCHITECTURE -bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); -#else -static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } -#endif - static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { + /* + * Ensure the rest of the request is published to kvm_check_request's + * caller. Paired with the smp_mb__after_atomic in kvm_check_request. + */ + smp_wmb(); set_bit(req, &vcpu->requests); } @@ -1098,6 +1089,12 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { if (test_bit(req, &vcpu->requests)) { clear_bit(req, &vcpu->requests); + + /* + * Ensure the rest of the request is visible to kvm_check_request's + * caller. Paired with the smp_wmb in kvm_make_request. + */ + smp_mb__after_atomic(); return true; } else { return false; @@ -1116,9 +1113,21 @@ struct kvm_device { /* create, destroy, and name are mandatory */ struct kvm_device_ops { const char *name; + + /* + * create is called holding kvm->lock and any operations not suitable + * to do while holding the lock should be deferred to init (see + * below). + */ int (*create)(struct kvm_device *dev, u32 type); /* + * init is called after create if create is successful and is called + * outside of holding kvm->lock. + */ + void (*init)(struct kvm_device *dev); + + /* * Destroy is responsible for freeing dev. * * Destroy may be called before or after destructors are called @@ -1169,6 +1178,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, @@ -1179,4 +1189,18 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ +#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS +/* If we wakeup during the poll time, was it a sucessful poll? */ +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return vcpu->valid_wakeup; +} + +#else +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return true; +} +#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ + #endif diff --git a/include/linux/leds-lp3952.h b/include/linux/leds-lp3952.h new file mode 100644 index 000000000000..49b37ed8d456 --- /dev/null +++ b/include/linux/leds-lp3952.h @@ -0,0 +1,125 @@ +/* + * LED driver for TI lp3952 controller + * + * Copyright (C) 2016, DAQRI, LLC. + * Author: Tony Makkiel <tony.makkiel@daqri.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef LEDS_LP3952_H_ +#define LEDS_LP3952_H_ + +#define LP3952_NAME "lp3952" +#define LP3952_CMD_REG_COUNT 8 +#define LP3952_BRIGHT_MAX 4 +#define LP3952_LABEL_MAX_LEN 15 + +#define LP3952_REG_LED_CTRL 0x00 +#define LP3952_REG_R1_BLNK_TIME_CTRL 0x01 +#define LP3952_REG_R1_BLNK_CYCLE_CTRL 0x02 +#define LP3952_REG_G1_BLNK_TIME_CTRL 0x03 +#define LP3952_REG_G1_BLNK_CYCLE_CTRL 0x04 +#define LP3952_REG_B1_BLNK_TIME_CTRL 0x05 +#define LP3952_REG_B1_BLNK_CYCLE_CTRL 0x06 +#define LP3952_REG_ENABLES 0x0B +#define LP3952_REG_PAT_GEN_CTRL 0x11 +#define LP3952_REG_RGB1_MAX_I_CTRL 0x12 +#define LP3952_REG_RGB2_MAX_I_CTRL 0x13 +#define LP3952_REG_CMD_0 0x50 +#define LP3952_REG_RESET 0x60 +#define REG_MAX LP3952_REG_RESET + +#define LP3952_PATRN_LOOP BIT(1) +#define LP3952_PATRN_GEN_EN BIT(2) +#define LP3952_INT_B00ST_LDR BIT(2) +#define LP3952_ACTIVE_MODE BIT(6) +#define LP3952_LED_MASK_ALL 0x3f + +/* Transition Time in ms */ +enum lp3952_tt { + TT0, + TT55, + TT110, + TT221, + TT422, + TT885, + TT1770, + TT3539 +}; + +/* Command Execution Time in ms */ +enum lp3952_cet { + CET197, + CET393, + CET590, + CET786, + CET1180, + CET1376, + CET1573, + CET1769, + CET1966, + CET2163, + CET2359, + CET2556, + CET2763, + CET2949, + CET3146 +}; + +/* Max Current in % */ +enum lp3952_colour_I_log_0 { + I0, + I7, + I14, + I21, + I32, + I46, + I71, + I100 +}; + +enum lp3952_leds { + LP3952_BLUE_2, + LP3952_GREEN_2, + LP3952_RED_2, + LP3952_BLUE_1, + LP3952_GREEN_1, + LP3952_RED_1, + LP3952_LED_ALL +}; + +struct lp3952_ctrl_hdl { + struct led_classdev cdev; + char name[LP3952_LABEL_MAX_LEN]; + enum lp3952_leds channel; + void *priv; +}; + +struct ptrn_gen_cmd { + union { + struct { + u16 tt:3; + u16 b:3; + u16 cet:4; + u16 g:3; + u16 r:3; + }; + struct { + u8 lsb; + u8 msb; + } bytes; + }; +} __packed; + +struct lp3952_led_array { + struct regmap *regmap; + struct i2c_client *client; + struct gpio_desc *enable_gpio; + struct lp3952_ctrl_hdl leds[LP3952_LED_ALL]; +}; + +#endif /* LEDS_LP3952_H_ */ diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h index b8d6fffed4d8..d215b4561180 100644 --- a/include/linux/leds-pca9532.h +++ b/include/linux/leds-pca9532.h @@ -16,6 +16,7 @@ #include <linux/leds.h> #include <linux/workqueue.h> +#include <dt-bindings/leds/leds-pca9532.h> enum pca9532_state { PCA9532_OFF = 0x0, @@ -24,16 +25,14 @@ enum pca9532_state { PCA9532_PWM1 = 0x3 }; -enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED, - PCA9532_TYPE_N2100_BEEP, PCA9532_TYPE_GPIO }; - struct pca9532_led { u8 id; struct i2c_client *client; - char *name; + const char *name; + const char *default_trigger; struct led_classdev ldev; struct work_struct work; - enum pca9532_type type; + u32 type; enum pca9532_state state; }; diff --git a/include/linux/leds.h b/include/linux/leds.h index f203a8f89d30..8a3b5d29602f 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -42,14 +42,16 @@ struct led_classdev { #define LED_UNREGISTERING (1 << 1) /* Upper 16 bits reflect control information */ #define LED_CORE_SUSPENDRESUME (1 << 16) -#define LED_BLINK_ONESHOT (1 << 17) -#define LED_BLINK_ONESHOT_STOP (1 << 18) -#define LED_BLINK_INVERT (1 << 19) -#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) -#define LED_BLINK_DISABLE (1 << 21) -#define LED_SYSFS_DISABLE (1 << 22) -#define LED_DEV_CAP_FLASH (1 << 23) -#define LED_HW_PLUGGABLE (1 << 24) +#define LED_BLINK_SW (1 << 17) +#define LED_BLINK_ONESHOT (1 << 18) +#define LED_BLINK_ONESHOT_STOP (1 << 19) +#define LED_BLINK_INVERT (1 << 20) +#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21) +#define LED_BLINK_DISABLE (1 << 22) +#define LED_SYSFS_DISABLE (1 << 23) +#define LED_DEV_CAP_FLASH (1 << 24) +#define LED_HW_PLUGGABLE (1 << 25) +#define LED_PANIC_INDICATOR (1 << 26) /* Set LED brightness level * Must not sleep. Use brightness_set_blocking for drivers @@ -71,8 +73,8 @@ struct led_classdev { * and if both are zero then a sensible default should be chosen. * The call should adjust the timings in that case and if it can't * match the values specified exactly. - * Deactivate blinking again when the brightness is set to a fixed - * value via the brightness_set() callback. + * Deactivate blinking again when the brightness is set to LED_OFF + * via the brightness_set() callback. */ int (*blink_set)(struct led_classdev *led_cdev, unsigned long *delay_on, @@ -323,10 +325,16 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) #endif /* CONFIG_LEDS_TRIGGERS */ /* Trigger specific functions */ -#ifdef CONFIG_LEDS_TRIGGER_IDE_DISK -extern void ledtrig_ide_activity(void); +#ifdef CONFIG_LEDS_TRIGGER_DISK +extern void ledtrig_disk_activity(void); #else -static inline void ledtrig_ide_activity(void) {} +static inline void ledtrig_disk_activity(void) {} +#endif + +#ifdef CONFIG_LEDS_TRIGGER_MTD +extern void ledtrig_mtd_activity(void); +#else +static inline void ledtrig_mtd_activity(void) {} #endif #if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE) @@ -358,6 +366,7 @@ struct gpio_led { unsigned gpio; unsigned active_low : 1; unsigned retain_state_suspended : 1; + unsigned panic_indicator : 1; unsigned default_state : 2; /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ struct gpio_desc *gpiod; @@ -378,8 +387,16 @@ struct gpio_led_platform_data { unsigned long *delay_off); }; +#ifdef CONFIG_NEW_LEDS struct platform_device *gpio_led_register_device( int id, const struct gpio_led_platform_data *pdata); +#else +static inline struct platform_device *gpio_led_register_device( + int id, const struct gpio_led_platform_data *pdata) +{ + return 0; +} +#endif enum cpu_led_event { CPU_LED_IDLE_START, /* CPU enters idle */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 2c4ebef79d0c..e37d4f99f510 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -146,13 +146,6 @@ enum { ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ - /* protocol flags */ - ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ - ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ - ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA, - ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ - ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ - /* struct ata_device stuff */ ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ @@ -180,6 +173,8 @@ enum { ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */ ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */ + ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ + ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ @@ -191,7 +186,8 @@ enum { ATA_DEV_SEMB = 7, /* SEMB */ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ ATA_DEV_ZAC = 9, /* ZAC device */ - ATA_DEV_NONE = 10, /* no device */ + ATA_DEV_ZAC_UNSUP = 10, /* ZAC device (unsupported) */ + ATA_DEV_NONE = 11, /* no device */ /* struct ata_link flags */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ @@ -727,6 +723,13 @@ struct ata_device { /* NCQ send and receive log subcommand support */ u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE]; + u8 ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_SIZE]; + + /* ZAC zone configuration */ + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; /* error history */ int spdn_cnt; @@ -1029,58 +1032,29 @@ extern const unsigned long sata_deb_timing_long[]; extern struct ata_port_operations ata_dummy_port_ops; extern const struct ata_port_info ata_dummy_port_info; -/* - * protocol tests - */ -static inline unsigned int ata_prot_flags(u8 prot) -{ - switch (prot) { - case ATA_PROT_NODATA: - return 0; - case ATA_PROT_PIO: - return ATA_PROT_FLAG_PIO; - case ATA_PROT_DMA: - return ATA_PROT_FLAG_DMA; - case ATA_PROT_NCQ: - return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ; - case ATAPI_PROT_NODATA: - return ATA_PROT_FLAG_ATAPI; - case ATAPI_PROT_PIO: - return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO; - case ATAPI_PROT_DMA: - return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA; - } - return 0; -} - -static inline int ata_is_atapi(u8 prot) -{ - return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI; -} - -static inline int ata_is_nodata(u8 prot) +static inline bool ata_is_atapi(u8 prot) { - return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA); + return prot & ATA_PROT_FLAG_ATAPI; } -static inline int ata_is_pio(u8 prot) +static inline bool ata_is_pio(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO; + return prot & ATA_PROT_FLAG_PIO; } -static inline int ata_is_dma(u8 prot) +static inline bool ata_is_dma(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA; + return prot & ATA_PROT_FLAG_DMA; } -static inline int ata_is_ncq(u8 prot) +static inline bool ata_is_ncq(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ; + return prot & ATA_PROT_FLAG_NCQ; } -static inline int ata_is_data(u8 prot) +static inline bool ata_is_data(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA; + return prot & (ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA); } static inline int is_multi_taskfile(struct ata_taskfile *tf) @@ -1397,7 +1371,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) return ap->nr_pmp_links != 0; } -static inline int ata_is_host_link(const struct ata_link *link) +static inline bool ata_is_host_link(const struct ata_link *link) { return link == &link->ap->link || link == link->ap->slave_link; } @@ -1412,7 +1386,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) return false; } -static inline int ata_is_host_link(const struct ata_link *link) +static inline bool ata_is_host_link(const struct ata_link *link) { return 1; } @@ -1523,7 +1497,8 @@ static inline unsigned int ata_class_enabled(unsigned int class) static inline unsigned int ata_class_disabled(unsigned int class) { return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP || - class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP; + class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP || + class == ATA_DEV_ZAC_UNSUP; } static inline unsigned int ata_class_absent(unsigned int class) @@ -1641,6 +1616,26 @@ static inline bool ata_fpdma_dsm_supported(struct ata_device *dev) ATA_LOG_NCQ_SEND_RECV_DSM_TRIM); } +static inline bool ata_fpdma_read_log_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_in_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_out_supported(struct ata_device *dev) +{ + return (dev->ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT); +} + static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) { qc->tf.ctl |= ATA_NIEN; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 833867b9ddc2..b519e137b9b7 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -27,7 +27,7 @@ enum { /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, ND_CMD_MAX_ELEM = 5, - ND_CMD_MAX_ENVELOPE = 16, + ND_CMD_MAX_ENVELOPE = 256, ND_MAX_MAPPINGS = 32, /* region flag indicating to direct-map persistent memory by default */ @@ -52,6 +52,7 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct nd_namespace_label; struct nvdimm_drvdata; + struct nd_mapping { struct nvdimm *nvdimm; struct nd_namespace_label **labels; @@ -68,7 +69,8 @@ struct nd_mapping { struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; - unsigned long dsm_mask; + unsigned long cmd_mask; + struct module *module; char *provider_name; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); @@ -99,13 +101,21 @@ struct nd_region_desc { unsigned long flags; }; +struct device; +void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, + size_t size, unsigned long flags); +static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, + resource_size_t offset, size_t size) +{ + return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0); +} + struct nvdimm_bus; struct module; struct device; struct nd_blk_region; struct nd_blk_region_desc { int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); - void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw); struct nd_region_desc ndr_desc; @@ -119,21 +129,22 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); -struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, - struct nvdimm_bus_descriptor *nfit_desc, struct module *module); -#define nvdimm_bus_register(parent, desc) \ - __nvdimm_bus_register(parent, desc, THIS_MODULE) +struct nvdimm_bus *nvdimm_bus_register(struct device *parent, + struct nvdimm_bus_descriptor *nfit_desc); void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); struct nvdimm_bus *to_nvdimm_bus(struct device *dev); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); +struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); const char *nvdimm_name(struct nvdimm *nvdimm); +unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); void *nvdimm_provider_data(struct nvdimm *nvdimm); struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, - unsigned long *dsm_mask); + unsigned long cmd_mask, int num_flush, + struct resource *flush_wpq); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, @@ -155,4 +166,6 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); +void nvdimm_flush(struct nd_region *nd_region); +int nvdimm_has_flush(struct nd_region *nd_region); #endif /* __LIBNVDIMM_H__ */ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index cdcb2ccbefa8..ba78b8306674 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -1,7 +1,9 @@ #ifndef NVM_H #define NVM_H +#include <linux/blkdev.h> #include <linux/types.h> +#include <uapi/linux/lightnvm.h> enum { NVM_IO_OK = 0, @@ -18,7 +20,7 @@ enum { #define NVM_SEC_BITS (8) #define NVM_PL_BITS (8) #define NVM_LUN_BITS (8) -#define NVM_CH_BITS (8) +#define NVM_CH_BITS (7) struct ppa_addr { /* Generic structure for all addresses */ @@ -30,8 +32,14 @@ struct ppa_addr { u64 pl : NVM_PL_BITS; u64 lun : NVM_LUN_BITS; u64 ch : NVM_CH_BITS; + u64 reserved : 1; } g; + struct { + u64 line : 63; + u64 is_cached : 1; + } c; + u64 ppa; }; }; @@ -41,13 +49,11 @@ struct nvm_id; struct nvm_dev; typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); -typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, nvm_l2p_update_fn *, void *); -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, - nvm_bb_update_fn *, void *); -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); +typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); @@ -202,6 +208,7 @@ struct nvm_id { struct nvm_target { struct list_head list; + struct nvm_dev *dev; struct nvm_tgt_type *type; struct gendisk *disk; }; @@ -232,14 +239,14 @@ struct nvm_rq { struct ppa_addr *ppa_list; - void *metadata; - dma_addr_t dma_metadata; + void *meta_list; + dma_addr_t dma_meta_list; struct completion *wait; nvm_end_io_fn *end_io; uint8_t opcode; - uint16_t nr_pages; + uint16_t nr_ppas; uint16_t flags; u64 ppa_status; /* ppa media status */ @@ -264,24 +271,15 @@ struct nvm_lun { int lun_id; int chnl_id; - /* It is up to the target to mark blocks as closed. If the target does - * not do it, all blocks are marked as open, and nr_open_blocks - * represents the number of blocks in use - */ - unsigned int nr_open_blocks; /* Number of used, writable blocks */ - unsigned int nr_closed_blocks; /* Number of used, read-only blocks */ - unsigned int nr_free_blocks; /* Number of unused blocks */ - unsigned int nr_bad_blocks; /* Number of bad blocks */ - spinlock_t lock; + unsigned int nr_free_blocks; /* Number of unused blocks */ struct nvm_block *blocks; }; enum { NVM_BLK_ST_FREE = 0x1, /* Free block */ - NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */ - NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */ + NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; @@ -307,7 +305,6 @@ struct nvm_dev { struct nvm_dev_ops *ops; struct list_head devices; - struct list_head online_targets; /* Media manager */ struct nvmm_type *mt; @@ -323,6 +320,8 @@ struct nvm_dev { int sec_per_pg; /* only sectors for a single page */ int pgs_per_blk; int blks_per_lun; + int fpg_size; + int pfpg_size; /* size of buffer if all pages are to be read */ int sec_size; int oob_size; int mccap; @@ -345,10 +344,9 @@ struct nvm_dev { unsigned long total_blocks; unsigned long total_secs; int nr_luns; - unsigned max_pages_per_blk; unsigned long *lun_map; - void *ppalist_pool; + void *dma_pool; struct nvm_id identity; @@ -380,6 +378,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, { struct ppa_addr l; + l.ppa = 0; /* * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. */ @@ -450,14 +449,19 @@ struct nvm_tgt_type { struct list_head list; }; -extern int nvm_register_target(struct nvm_tgt_type *); -extern void nvm_unregister_target(struct nvm_tgt_type *); +extern struct nvm_tgt_type *nvm_find_target_type(const char *, int); + +extern int nvm_register_tgt_type(struct nvm_tgt_type *); +extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); typedef int (nvmm_register_fn)(struct nvm_dev *); typedef void (nvmm_unregister_fn)(struct nvm_dev *); + +typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); +typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, struct nvm_lun *, unsigned long); typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); @@ -467,6 +471,7 @@ typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, unsigned long); +typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int); typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); typedef int (nvmm_reserve_lun)(struct nvm_dev *, int); typedef void (nvmm_release_lun)(struct nvm_dev *, int); @@ -482,9 +487,10 @@ struct nvmm_type { nvmm_register_fn *register_mgr; nvmm_unregister_fn *unregister_mgr; + nvmm_create_tgt_fn *create_tgt; + nvmm_remove_tgt_fn *remove_tgt; + /* Block administration callbacks */ - nvmm_get_blk_fn *get_blk_unlocked; - nvmm_put_blk_fn *put_blk_unlocked; nvmm_get_blk_fn *get_blk; nvmm_put_blk_fn *put_blk; nvmm_open_blk_fn *open_blk; @@ -494,6 +500,9 @@ struct nvmm_type { nvmm_submit_io_fn *submit_io; nvmm_erase_blk_fn *erase_blk; + /* Bad block mgmt */ + nvmm_mark_blk_fn *mark_blk; + /* Configuration management */ nvmm_get_lun_fn *get_lun; nvmm_reserve_lun *reserve_lun; @@ -511,10 +520,6 @@ struct nvmm_type { extern int nvm_register_mgr(struct nvmm_type *); extern void nvm_unregister_mgr(struct nvmm_type *); -extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *, - struct nvm_lun *, unsigned long); -extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *); - extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, unsigned long); extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); @@ -523,17 +528,23 @@ extern int nvm_register(struct request_queue *, char *, struct nvm_dev_ops *); extern void nvm_unregister(char *); +void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type); + extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, - struct ppa_addr *, int); + const struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); extern void nvm_end_io(struct nvm_rq *, int); extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, void *, int); +extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, + int, void *, int); +extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); +extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); /* sysblk.c */ #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ @@ -554,6 +565,13 @@ extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_dev_factory(struct nvm_dev *, int flags); + +#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \ + for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \ + (chid)++, (ppa).g.ch = (chid)) \ + for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \ + (lunid)++, (ppa).g.lun = (lunid)) + #else /* CONFIG_NVM */ struct nvm_dev_ops; diff --git a/include/linux/list.h b/include/linux/list.h index 5356f4d661a7..5183138aa932 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h) } /* + * Check whether the node is the only node of the head without + * accessing head: + */ +static inline bool +hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) +{ + return !n->next && n->pprev == &h->first; +} + +/* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index bd830d590465..a93a0b23dc8d 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -65,27 +65,8 @@ struct klp_func { }; /** - * struct klp_reloc - relocation structure for live patching - * @loc: address where the relocation will be written - * @sympos: position in kallsyms to disambiguate symbols (optional) - * @type: ELF relocation type - * @name: name of the referenced symbol (for lookup/verification) - * @addend: offset from the referenced symbol - * @external: symbol is either exported or within the live patch module itself - */ -struct klp_reloc { - unsigned long loc; - unsigned long sympos; - unsigned long type; - const char *name; - int addend; - int external; -}; - -/** * struct klp_object - kernel object structure for live patching * @name: module name (or NULL for vmlinux) - * @relocs: relocation entries to be applied at load time * @funcs: function entries for functions to be patched in the object * @kobj: kobject for sysfs resources * @mod: kernel module associated with the patched object @@ -95,7 +76,6 @@ struct klp_reloc { struct klp_object { /* external */ const char *name; - struct klp_reloc *relocs; struct klp_func *funcs; /* internal */ @@ -124,10 +104,12 @@ struct klp_patch { }; #define klp_for_each_object(patch, obj) \ - for (obj = patch->objs; obj->funcs; obj++) + for (obj = patch->objs; obj->funcs || obj->name; obj++) #define klp_for_each_func(obj, func) \ - for (func = obj->funcs; func->old_name; func++) + for (func = obj->funcs; \ + func->old_name || func->new_func || func->old_sympos; \ + func++) int klp_register_patch(struct klp_patch *); int klp_unregister_patch(struct klp_patch *); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index d10ef06971b5..eabe0138eb06 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -356,8 +356,13 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); extern void lockdep_clear_current_reclaim_state(void); extern void lockdep_trace_alloc(gfp_t mask); -extern void lock_pin_lock(struct lockdep_map *lock); -extern void lock_unpin_lock(struct lockdep_map *lock); +struct pin_cookie { unsigned int val; }; + +#define NIL_COOKIE (struct pin_cookie){ .val = 0U, } + +extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); +extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); +extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, @@ -373,8 +378,9 @@ extern void lock_unpin_lock(struct lockdep_map *lock); #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) -#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) -#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map) +#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) +#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) +#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) #else /* !CONFIG_LOCKDEP */ @@ -427,8 +433,13 @@ struct lock_class_key { }; #define lockdep_recursing(tsk) (0) -#define lockdep_pin_lock(l) do { (void)(l); } while (0) -#define lockdep_unpin_lock(l) do { (void)(l); } while (0) +struct pin_cookie { }; + +#define NIL_COOKIE (struct pin_cookie){ } + +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) +#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) +#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) #endif /* !LOCKDEP */ @@ -446,6 +457,18 @@ do { \ lock_acquired(&(_lock)->dep_map, _RET_IP_); \ } while (0) +#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ +({ \ + int ____err = 0; \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + ____err = lock(_lock); \ + } \ + if (!____err) \ + lock_acquired(&(_lock)->dep_map, _RET_IP_); \ + ____err; \ +}) + #else /* CONFIG_LOCK_STAT */ #define lock_contended(lockdep_map, ip) do {} while (0) @@ -454,6 +477,9 @@ do { \ #define LOCK_CONTENDED(_lock, try, lock) \ lock(_lock) +#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ + lock(_lock) + #endif /* CONFIG_LOCK_STAT */ #ifdef CONFIG_LOCKDEP diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index cdee11cbcdf1..101bf19c0f41 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1190,7 +1190,8 @@ * Return 0 if permission is granted. * @settime: * Check permission to change the system time. - * struct timespec and timezone are defined in include/linux/time.h + * struct timespec64 is defined in include/linux/time64.h and timezone + * is defined in include/linux/time.h * @ts contains new time * @tz contains new timezone * Return 0 if permission is granted. @@ -1327,7 +1328,7 @@ union security_list_options { int (*quotactl)(int cmds, int type, int id, struct super_block *sb); int (*quota_on)(struct dentry *dentry); int (*syslog)(int type); - int (*settime)(const struct timespec *ts, const struct timezone *tz); + int (*settime)(const struct timespec64 *ts, const struct timezone *tz); int (*vm_enough_memory)(struct mm_struct *mm, long pages); int (*bprm_set_creds)(struct linux_binprm *bprm); @@ -1343,10 +1344,10 @@ union security_list_options { int (*sb_kern_mount)(struct super_block *sb, int flags, void *data); int (*sb_show_options)(struct seq_file *m, struct super_block *sb); int (*sb_statfs)(struct dentry *dentry); - int (*sb_mount)(const char *dev_name, struct path *path, + int (*sb_mount)(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data); int (*sb_umount)(struct vfsmount *mnt, int flags); - int (*sb_pivotroot)(struct path *old_path, struct path *new_path); + int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); int (*sb_set_mnt_opts)(struct super_block *sb, struct security_mnt_opts *opts, unsigned long kern_flags, @@ -1355,28 +1356,28 @@ union security_list_options { struct super_block *newsb); int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts); int (*dentry_init_security)(struct dentry *dentry, int mode, - struct qstr *name, void **ctx, + const struct qstr *name, void **ctx, u32 *ctxlen); #ifdef CONFIG_SECURITY_PATH - int (*path_unlink)(struct path *dir, struct dentry *dentry); - int (*path_mkdir)(struct path *dir, struct dentry *dentry, + int (*path_unlink)(const struct path *dir, struct dentry *dentry); + int (*path_mkdir)(const struct path *dir, struct dentry *dentry, umode_t mode); - int (*path_rmdir)(struct path *dir, struct dentry *dentry); - int (*path_mknod)(struct path *dir, struct dentry *dentry, + int (*path_rmdir)(const struct path *dir, struct dentry *dentry); + int (*path_mknod)(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); - int (*path_truncate)(struct path *path); - int (*path_symlink)(struct path *dir, struct dentry *dentry, + int (*path_truncate)(const struct path *path); + int (*path_symlink)(const struct path *dir, struct dentry *dentry, const char *old_name); - int (*path_link)(struct dentry *old_dentry, struct path *new_dir, + int (*path_link)(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry); - int (*path_rename)(struct path *old_dir, struct dentry *old_dentry, - struct path *new_dir, + int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry); - int (*path_chmod)(struct path *path, umode_t mode); - int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid); - int (*path_chroot)(struct path *path); + int (*path_chmod)(const struct path *path, umode_t mode); + int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); + int (*path_chroot)(const struct path *path); #endif int (*inode_alloc_security)(struct inode *inode); @@ -1804,7 +1805,6 @@ struct security_hook_heads { struct list_head tun_dev_attach_queue; struct list_head tun_dev_attach; struct list_head tun_dev_open; - struct list_head skb_owned_by; #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM struct list_head xfrm_policy_alloc_security; @@ -1893,5 +1893,10 @@ extern void __init yama_add_hooks(void); #else static inline void __init yama_add_hooks(void) { } #endif +#ifdef CONFIG_SECURITY_LOADPIN +void __init loadpin_add_hooks(void); +#else +static inline void loadpin_add_hooks(void) { }; +#endif #endif /* ! __LINUX_LSM_HOOKS_H */ diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h new file mode 100644 index 000000000000..6b55c938b401 --- /dev/null +++ b/include/linux/mailbox/brcm-message.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common header for Broadcom mailbox messages which is shared across + * Broadcom SoCs and Broadcom mailbox client drivers. + */ + +#ifndef _LINUX_BRCM_MESSAGE_H_ +#define _LINUX_BRCM_MESSAGE_H_ + +#include <linux/scatterlist.h> + +enum brcm_message_type { + BRCM_MESSAGE_UNKNOWN = 0, + BRCM_MESSAGE_SPU, + BRCM_MESSAGE_SBA, + BRCM_MESSAGE_MAX, +}; + +struct brcm_sba_command { + u64 cmd; +#define BRCM_SBA_CMD_TYPE_A BIT(0) +#define BRCM_SBA_CMD_TYPE_B BIT(1) +#define BRCM_SBA_CMD_TYPE_C BIT(2) +#define BRCM_SBA_CMD_HAS_RESP BIT(3) +#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4) + u64 flags; + dma_addr_t input; + size_t input_len; + dma_addr_t resp; + size_t resp_len; + dma_addr_t output; + size_t output_len; +}; + +struct brcm_message { + enum brcm_message_type type; + union { + struct { + struct scatterlist *src; + struct scatterlist *dst; + } spu; + struct { + struct brcm_sba_command *cmds; + unsigned int cmds_count; + } sba; + }; + void *ctx; + int error; +}; + +#endif /* _LINUX_BRCM_MESSAGE_H_ */ diff --git a/include/linux/mbus.h b/include/linux/mbus.h index ea34a867caa0..d610232762e3 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -66,7 +66,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo } #endif -int mvebu_mbus_save_cpu_target(u32 *store_addr); +int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr); diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 433e0c74d643..a585b4b5fa0e 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -14,6 +14,8 @@ #include <asm/io.h> #include <linux/rtc.h> /* get the user-level API */ #include <asm/mc146818rtc.h> /* register access macros */ +#include <linux/bcd.h> +#include <linux/delay.h> #ifdef __KERNEL__ #include <linux/spinlock.h> /* spinlock_t */ @@ -120,4 +122,7 @@ struct cmos_rtc_board_info { #define RTC_IO_EXTENT_USED RTC_IO_EXTENT #endif /* ARCH_RTC_LOCATION */ +unsigned int mc146818_get_time(struct rtc_time *time); +int mc146818_set_time(struct rtc_time *time); + #endif /* _MC146818RTC_H */ diff --git a/include/linux/mcb.h b/include/linux/mcb.h index ed06e15a36aa..ead13d233a97 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -15,22 +15,30 @@ #include <linux/device.h> #include <linux/irqreturn.h> +#define CHAMELEON_FILENAME_LEN 12 + struct mcb_driver; struct mcb_device; /** * struct mcb_bus - MEN Chameleon Bus * - * @dev: pointer to carrier device - * @children: the child busses + * @dev: bus device + * @carrier: pointer to carrier device * @bus_nr: mcb bus number * @get_irq: callback to get IRQ number + * @revision: the FPGA's revision number + * @model: the FPGA's model number + * @filename: the FPGA's name */ struct mcb_bus { - struct list_head children; struct device dev; struct device *carrier; int bus_nr; + u8 revision; + char model; + u8 minor; + char name[CHAMELEON_FILENAME_LEN + 1]; int (*get_irq)(struct mcb_device *dev); }; #define to_mcb_bus(b) container_of((b), struct mcb_bus, dev) diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h index a243dbba8659..61f5b21b31c7 100644 --- a/include/linux/mdio-mux.h +++ b/include/linux/mdio-mux.h @@ -10,11 +10,13 @@ #ifndef __LINUX_MDIO_MUX_H #define __LINUX_MDIO_MUX_H #include <linux/device.h> +#include <linux/phy.h> int mdio_mux_init(struct device *dev, int (*switch_fn) (int cur, int desired, void *data), void **mux_handle, - void *data); + void *data, + struct mii_bus *mux_bus); void mdio_mux_uninit(void *mux_handle); diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 5bfd99d1a40a..bf9d1d750693 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -13,6 +13,17 @@ struct mii_bus; +/* Multiple levels of nesting are possible. However typically this is + * limited to nested DSA like layer, a MUX layer, and the normal + * user. Instead of trying to handle the general case, just define + * these cases. + */ +enum mdio_mutex_lock_class { + MDIO_MUTEX_NORMAL, + MDIO_MUTEX_MUX, + MDIO_MUTEX_NESTED, +}; + struct mdio_device { struct device dev; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 3106ac1c895e..2925da23505d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -73,8 +73,8 @@ extern bool movable_node_enabled; if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, - phys_addr_t start, phys_addr_t end, - int nid, ulong flags); + phys_addr_t start, phys_addr_t end, + int nid, ulong flags); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); @@ -110,7 +110,7 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, phys_addr_t *out_end, int *out_nid); void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, - phys_addr_t *out_end); + phys_addr_t *out_end); /** * for_each_mem_range - iterate through memblock areas from type_a and not @@ -148,7 +148,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, p_start, p_end, p_nid) \ for (i = (u64)ULLONG_MAX, \ __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ - p_start, p_end, p_nid); \ + p_start, p_end, p_nid); \ i != (u64)ULLONG_MAX; \ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) @@ -163,8 +163,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, * is initialized. */ #define for_each_reserved_mem_region(i, p_start, p_end) \ - for (i = 0UL, \ - __next_reserved_mem_region(&i, p_start, p_end); \ + for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ i != (u64)ULLONG_MAX; \ __next_reserved_mem_region(&i, p_start, p_end)) @@ -333,6 +332,7 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); +void memblock_mem_limit_remove_map(phys_addr_t limit); bool memblock_is_memory(phys_addr_t addr); int memblock_is_map_memory(phys_addr_t addr); int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); @@ -403,15 +403,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo } #define for_each_memblock(memblock_type, region) \ - for (region = memblock.memblock_type.regions; \ + for (region = memblock.memblock_type.regions; \ region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) #define for_each_memblock_type(memblock_type, rgn) \ - idx = 0; \ - rgn = &memblock_type->regions[idx]; \ - for (idx = 0; idx < memblock_type->cnt; \ - idx++,rgn = &memblock_type->regions[idx]) + for (idx = 0, rgn = &memblock_type->regions[0]; \ + idx < memblock_type->cnt; \ + idx++, rgn = &memblock_type->regions[idx]) #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1191d79aa495..5d8ca6e02e39 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -52,7 +52,7 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, /* default hierarchy stats */ - MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS, + MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS, MEMCG_SLAB_RECLAIMABLE, MEMCG_SLAB_UNRECLAIMABLE, MEMCG_SOCK, @@ -60,7 +60,7 @@ enum mem_cgroup_stat_index { }; struct mem_cgroup_reclaim_cookie { - struct zone *zone; + pg_data_t *pgdat; int priority; unsigned int generation; }; @@ -97,6 +97,11 @@ enum mem_cgroup_events_target { #define MEM_CGROUP_ID_SHIFT 16 #define MEM_CGROUP_ID_MAX USHRT_MAX +struct mem_cgroup_id { + int id; + atomic_t ref; +}; + struct mem_cgroup_stat_cpu { long count[MEMCG_NR_STAT]; unsigned long events[MEMCG_NR_EVENTS]; @@ -113,7 +118,7 @@ struct mem_cgroup_reclaim_iter { /* * per-zone information in memory controller. */ -struct mem_cgroup_per_zone { +struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[NR_LRU_LISTS]; @@ -127,10 +132,6 @@ struct mem_cgroup_per_zone { /* use container_of */ }; -struct mem_cgroup_per_node { - struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; -}; - struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; @@ -172,6 +173,9 @@ enum memcg_kmem_state { struct mem_cgroup { struct cgroup_subsys_state css; + /* Private memcg ID. Used to ID objects that outlive the cgroup */ + struct mem_cgroup_id id; + /* Accounted resources */ struct page_counter memory; struct page_counter swap; @@ -306,8 +310,46 @@ void mem_cgroup_uncharge_list(struct list_head *page_list); void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); -struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); -struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); +static struct mem_cgroup_per_node * +mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) +{ + return memcg->nodeinfo[nid]; +} + +/** + * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone + * @node: node of the wanted lruvec + * @memcg: memcg of the wanted lruvec + * + * Returns the lru list vector holding pages for a given @node or a given + * @memcg and @zone. This can be the node lruvec, if the memory controller + * is disabled. + */ +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) +{ + struct mem_cgroup_per_node *mz; + struct lruvec *lruvec; + + if (mem_cgroup_disabled()) { + lruvec = node_lruvec(pgdat); + goto out; + } + + mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); + lruvec = &mz->lruvec; +out: + /* + * Since a node can be onlined after the mem_cgroup was created, + * we have to be prepared to initialize lruvec->pgdat here; + * and if offlined then reonlined, we need to reinitialize it. + */ + if (unlikely(lruvec->pgdat != pgdat)) + lruvec->pgdat = pgdat; + return lruvec; +} + +struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -330,22 +372,9 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) if (mem_cgroup_disabled()) return 0; - return memcg->css.id; -} - -/** - * mem_cgroup_from_id - look up a memcg from an id - * @id: the id to look up - * - * Caller must hold rcu_read_lock() and use css_tryget() as necessary. - */ -static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) -{ - struct cgroup_subsys_state *css; - - css = css_from_id(id, &memory_cgrp_subsys); - return mem_cgroup_from_css(css); + return memcg->id.id; } +struct mem_cgroup *mem_cgroup_from_id(unsigned short id); /** * parent_mem_cgroup - find the accounting parent of a memcg @@ -409,31 +438,12 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { - struct mem_cgroup_per_zone *mz; + struct mem_cgroup_per_node *mz; - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); return mz->lru_size[lru]; } -static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - unsigned long inactive_ratio; - unsigned long inactive; - unsigned long active; - unsigned long gb; - - inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); - active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); - - gb = (inactive + active) >> (30 - PAGE_SHIFT); - if (gb) - inactive_ratio = int_sqrt(10 * gb); - else - inactive_ratio = 1; - - return inactive * inactive_ratio < active; -} - void mem_cgroup_handle_over_high(void); void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, @@ -501,7 +511,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, mem_cgroup_update_page_stat(page, idx, -1); } -unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); @@ -592,16 +602,16 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new) { } -static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, - struct mem_cgroup *memcg) +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) { - return &zone->lruvec; + return node_lruvec(pgdat); } static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, - struct zone *zone) + struct pglist_data *pgdat) { - return &zone->lruvec; + return &pgdat->lruvec; } static inline bool mm_match_cgroup(struct mm_struct *mm, @@ -646,24 +656,12 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return true; } -static inline bool -mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - return true; -} - static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { return 0; } -static inline void -mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, - int increment) -{ -} - static inline unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask) @@ -717,7 +715,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, } static inline -unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) { @@ -785,6 +783,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) } #endif +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +void memcg_kmem_put_cache(struct kmem_cache *cachep); +int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void memcg_kmem_uncharge(struct page *page, int order); + #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) extern struct static_key_false memcg_kmem_enabled_key; @@ -806,22 +811,6 @@ static inline bool memcg_kmem_enabled(void) } /* - * In general, we'll do everything in our power to not incur in any overhead - * for non-memcg users for the kmem functions. Not even a function call, if we - * can avoid it. - * - * Therefore, we'll inline all those functions so that in the best case, we'll - * see that kmemcg is off for everybody and proceed quickly. If it is on, - * we'll still do most of the flag checking inline. We check a lot of - * conditions, but because they are pretty simple, they are expected to be - * fast. - */ -int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, - struct mem_cgroup *memcg); -int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); -void __memcg_kmem_uncharge(struct page *page, int order); - -/* * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function * will return -1 when this is not a kmem-limited memcg. @@ -831,67 +820,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); -void __memcg_kmem_put_cache(struct kmem_cache *cachep); - -static inline bool __memcg_kmem_bypass(void) -{ - if (!memcg_kmem_enabled()) - return true; - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) - return true; - return false; -} - -/** - * memcg_kmem_charge: charge a kmem page - * @page: page to charge - * @gfp: reclaim mode - * @order: allocation order - * - * Returns 0 on success, an error code on failure. - */ -static __always_inline int memcg_kmem_charge(struct page *page, - gfp_t gfp, int order) -{ - if (__memcg_kmem_bypass()) - return 0; - if (!(gfp & __GFP_ACCOUNT)) - return 0; - return __memcg_kmem_charge(page, gfp, order); -} - -/** - * memcg_kmem_uncharge: uncharge a kmem page - * @page: page to uncharge - * @order: allocation order - */ -static __always_inline void memcg_kmem_uncharge(struct page *page, int order) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge(page, order); -} - -/** - * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation - * @cachep: the original global kmem cache - * - * All memory allocated from a per-memcg cache is charged to the owner memcg. - */ -static __always_inline struct kmem_cache * -memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) -{ - if (__memcg_kmem_bypass()) - return cachep; - return __memcg_kmem_get_cache(cachep, gfp); -} - -static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_put_cache(cachep); -} - /** * memcg_kmem_update_page_stat - update kmem page state statistics * @page: the page @@ -914,15 +842,6 @@ static inline bool memcg_kmem_enabled(void) return false; } -static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) -{ - return 0; -} - -static inline void memcg_kmem_uncharge(struct page *page, int order) -{ -} - static inline int memcg_cache_id(struct mem_cgroup *memcg) { return -1; @@ -936,16 +855,6 @@ static inline void memcg_put_cache_ids(void) { } -static inline struct kmem_cache * -memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) -{ - return cachep; -} - -static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ -} - static inline void memcg_kmem_update_page_stat(struct page *page, enum mem_cgroup_stat_index idx, int val) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index adbef586e696..01033fadea47 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -182,7 +182,7 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE -extern void register_page_bootmem_info_node(struct pglist_data *pgdat); +extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); #else static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { @@ -247,16 +247,16 @@ static inline void mem_hotplug_done(void) {} #ifdef CONFIG_MEMORY_HOTREMOVE -extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); +extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern void remove_memory(int nid, u64 start, u64 size); #else -static inline int is_mem_section_removable(unsigned long pfn, +static inline bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages) { - return 0; + return false; } static inline void try_offline_node(int nid) {} @@ -284,5 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, unsigned long map_offset); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); +extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 2696c1f05ed1..5e5b2969d931 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -172,14 +172,14 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol); extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ -static inline int vma_migratable(struct vm_area_struct *vma) +static inline bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) - return 0; + return false; #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION if (vma->vm_flags & VM_HUGETLB) - return 0; + return false; #endif /* @@ -190,11 +190,12 @@ static inline int vma_migratable(struct vm_area_struct *vma) if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) - return 0; - return 1; + return false; + return true; } extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +extern void mpol_put_task_policy(struct task_struct *); #else @@ -228,6 +229,12 @@ static inline void mpol_free_shared_policy(struct shared_policy *p) { } +static inline struct mempolicy * +mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +{ + return NULL; +} + #define vma_policy(vma) NULL static inline int @@ -291,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, return -1; /* no node preference */ } +static inline void mpol_put_task_policy(struct task_struct *task) +{ +} #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 69b6951e8fd2..b1086c936507 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -5,6 +5,7 @@ #define _LINUX_MEMPOOL_H #include <linux/wait.h> +#include <linux/compiler.h> struct kmem_cache; @@ -31,7 +32,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, extern int mempool_resize(mempool_t *pool, int new_min_nr); extern void mempool_destroy(mempool_t *pool); -extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); +extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; extern void mempool_free(void *element, mempool_t *pool); /* diff --git a/include/linux/memremap.h b/include/linux/memremap.h index bcaa634139a9..93416196ba64 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,7 +26,7 @@ struct vmem_altmap { unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); -#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE) +#ifdef CONFIG_ZONE_DEVICE struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); #else static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h index 689312745b2f..01024d1aed0e 100644 --- a/include/linux/mfd/abx500/ab8500-sysctrl.h +++ b/include/linux/mfd/abx500/ab8500-sysctrl.h @@ -37,12 +37,6 @@ static inline int ab8500_sysctrl_clear(u16 reg, u8 bits) return ab8500_sysctrl_write(reg, bits, 0); } -/* Configuration data for SysClkReq1RfClkBuf - SysClkReq8RfClkBuf */ -struct ab8500_sysctrl_platform_data { - u8 initial_req_buf_config[8]; - u16 (*reboot_reason_code)(const char *cmd); -}; - /* Registers */ #define AB8500_TURNONSTATUS 0x100 #define AB8500_RESETSTATUS 0x101 diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h new file mode 100644 index 000000000000..45a5e6e7db54 --- /dev/null +++ b/include/linux/mfd/altera-a10sr.h @@ -0,0 +1,85 @@ +/* + * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + * Declarations for Altera Arria10 MAX5 System Resource Chip + * + * Adapted from DA9052 + */ + +#ifndef __MFD_ALTERA_A10SR_H +#define __MFD_ALTERA_A10SR_H + +#include <linux/completion.h> +#include <linux/list.h> +#include <linux/mfd/core.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +/* Write registers are always on even addresses */ +#define WRITE_REG_MASK 0xFE +/* Odd registers are always on odd addresses */ +#define READ_REG_MASK 0x01 + +#define ALTR_A10SR_BITS_PER_REGISTER 8 +/* + * To find the correct register, we divide the input GPIO by + * the number of GPIO in each register. We then need to multiply + * by 2 because the reads are at odd addresses. + */ +#define ALTR_A10SR_REG_OFFSET(X) (((X) / ALTR_A10SR_BITS_PER_REGISTER) << 1) +#define ALTR_A10SR_REG_BIT(X) ((X) % ALTR_A10SR_BITS_PER_REGISTER) +#define ALTR_A10SR_REG_BIT_CHG(X, Y) ((X) << ALTR_A10SR_REG_BIT(Y)) +#define ALTR_A10SR_REG_BIT_MASK(X) (1 << ALTR_A10SR_REG_BIT(X)) + +/* Arria10 System Controller Register Defines */ +#define ALTR_A10SR_NOP 0x00 /* No Change */ +#define ALTR_A10SR_VERSION_READ 0x00 /* MAX5 Version Read */ + +#define ALTR_A10SR_LED_REG 0x02 /* LED - Upper 4 bits */ +/* LED register Bit Definitions */ +#define ALTR_A10SR_LED_VALID_SHIFT 4 /* LED - Upper 4 bits valid */ +#define ALTR_A10SR_OUT_VALID_RANGE_LO ALTR_A10SR_LED_VALID_SHIFT +#define ALTR_A10SR_OUT_VALID_RANGE_HI 7 + +#define ALTR_A10SR_PBDSW_REG 0x04 /* PB & DIP SW - Input only */ +#define ALTR_A10SR_PBDSW_IRQ_REG 0x06 /* PB & DIP SW Flag Clear */ +/* Pushbutton & DIP Switch Bit Definitions */ +#define ALTR_A10SR_IN_VALID_RANGE_LO 8 +#define ALTR_A10SR_IN_VALID_RANGE_HI 15 + +#define ALTR_A10SR_PWR_GOOD1_REG 0x08 /* Power Good1 Read */ +#define ALTR_A10SR_PWR_GOOD2_REG 0x0A /* Power Good2 Read */ +#define ALTR_A10SR_PWR_GOOD3_REG 0x0C /* Power Good3 Read */ +#define ALTR_A10SR_FMCAB_REG 0x0E /* FMCA/B & PCIe Pwr Enable */ +#define ALTR_A10SR_HPS_RST_REG 0x10 /* HPS Reset */ +#define ALTR_A10SR_USB_QSPI_REG 0x12 /* USB, BQSPI, FILE Reset */ +#define ALTR_A10SR_SFPA_REG 0x14 /* SFPA Control Reg */ +#define ALTR_A10SR_SFPB_REG 0x16 /* SFPB Control Reg */ +#define ALTR_A10SR_I2C_M_REG 0x18 /* I2C Master Select */ +#define ALTR_A10SR_WARM_RST_REG 0x1A /* HPS Warm Reset */ +#define ALTR_A10SR_WR_KEY_REG 0x1C /* HPS Warm Reset Key */ +#define ALTR_A10SR_PMBUS_REG 0x1E /* HPS PM Bus */ + +/** + * struct altr_a10sr - Altera Max5 MFD device private data structure + * @dev: : this device + * @regmap: the regmap assigned to the parent device. + */ +struct altr_a10sr { + struct device *dev; + struct regmap *regmap; +}; + +#endif /* __MFD_ALTERA_A10SR_H */ diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index d55a42297d49..58ab4c0fe761 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -14,6 +14,7 @@ #define _WM_ARIZONA_CORE_H #include <linux/interrupt.h> +#include <linux/notifier.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/mfd/arizona/pdata.h> @@ -148,8 +149,17 @@ struct arizona { uint16_t dac_comp_coeff; uint8_t dac_comp_enabled; struct mutex dac_comp_lock; + + struct blocking_notifier_head notifier; }; +static inline int arizona_call_notifiers(struct arizona *arizona, + unsigned long event, + void *data) +{ + return blocking_notifier_call_chain(&arizona->notifier, event, data); +} + int arizona_clk32k_enable(struct arizona *arizona); int arizona_clk32k_disable(struct arizona *arizona); diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index cd7e78eae006..0d06c5d0af93 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -856,12 +856,6 @@ #define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38 #define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40 #define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48 -#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60 -#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68 -#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30 -#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38 -#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40 -#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48 #define ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50 #define ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58 #define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60 diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h index 8d43e9f2a842..51e6f9414575 100644 --- a/include/linux/mfd/as3722.h +++ b/include/linux/mfd/as3722.h @@ -196,6 +196,7 @@ #define AS3722_LDO3_VSEL_MIN 0x01 #define AS3722_LDO3_VSEL_MAX 0x2D #define AS3722_LDO3_NUM_VOLT 0x2D +#define AS3722_LDO6_VSEL_BYPASS 0x3F #define AS3722_LDO_VSEL_MASK 0x7F #define AS3722_LDO_VSEL_MIN 0x01 #define AS3722_LDO_VSEL_MAX 0x7F diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index d82e7d51372b..0be4982f08fe 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -20,6 +20,7 @@ enum { AXP221_ID, AXP223_ID, AXP288_ID, + AXP809_ID, NR_AXP20X_VARIANTS, }; @@ -264,6 +265,29 @@ enum { AXP22X_REG_ID_MAX, }; +enum { + AXP809_DCDC1 = 0, + AXP809_DCDC2, + AXP809_DCDC3, + AXP809_DCDC4, + AXP809_DCDC5, + AXP809_DC1SW, + AXP809_DC5LDO, + AXP809_ALDO1, + AXP809_ALDO2, + AXP809_ALDO3, + AXP809_ELDO1, + AXP809_ELDO2, + AXP809_ELDO3, + AXP809_DLDO1, + AXP809_DLDO2, + AXP809_RTC_LDO, + AXP809_LDO_IO0, + AXP809_LDO_IO1, + AXP809_SW, + AXP809_REG_ID_MAX, +}; + /* IRQs */ enum { AXP152_IRQ_LDO0IN_CONNECT = 1, @@ -390,6 +414,41 @@ enum axp288_irqs { AXP288_IRQ_BC_USB_CHNG, }; +enum axp809_irqs { + AXP809_IRQ_ACIN_OVER_V = 1, + AXP809_IRQ_ACIN_PLUGIN, + AXP809_IRQ_ACIN_REMOVAL, + AXP809_IRQ_VBUS_OVER_V, + AXP809_IRQ_VBUS_PLUGIN, + AXP809_IRQ_VBUS_REMOVAL, + AXP809_IRQ_VBUS_V_LOW, + AXP809_IRQ_BATT_PLUGIN, + AXP809_IRQ_BATT_REMOVAL, + AXP809_IRQ_BATT_ENT_ACT_MODE, + AXP809_IRQ_BATT_EXIT_ACT_MODE, + AXP809_IRQ_CHARG, + AXP809_IRQ_CHARG_DONE, + AXP809_IRQ_BATT_CHG_TEMP_HIGH, + AXP809_IRQ_BATT_CHG_TEMP_HIGH_END, + AXP809_IRQ_BATT_CHG_TEMP_LOW, + AXP809_IRQ_BATT_CHG_TEMP_LOW_END, + AXP809_IRQ_BATT_ACT_TEMP_HIGH, + AXP809_IRQ_BATT_ACT_TEMP_HIGH_END, + AXP809_IRQ_BATT_ACT_TEMP_LOW, + AXP809_IRQ_BATT_ACT_TEMP_LOW_END, + AXP809_IRQ_DIE_TEMP_HIGH, + AXP809_IRQ_LOW_PWR_LVL1, + AXP809_IRQ_LOW_PWR_LVL2, + AXP809_IRQ_TIMER, + AXP809_IRQ_PEK_RIS_EDGE, + AXP809_IRQ_PEK_FAL_EDGE, + AXP809_IRQ_PEK_SHORT, + AXP809_IRQ_PEK_LONG, + AXP809_IRQ_PEK_OVER_OFF, + AXP809_IRQ_GPIO1_INPUT, + AXP809_IRQ_GPIO0_INPUT, +}; + #define AXP288_TS_ADC_H 0x58 #define AXP288_TS_ADC_L 0x59 #define AXP288_GP_ADC_H 0x5a diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index bc6f7e00fb3d..99c0395fe1f9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -17,7 +17,7 @@ #include <linux/platform_device.h> struct irq_domain; -struct property_set; +struct property_entry; /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ struct mfd_cell_acpi_match { @@ -47,7 +47,7 @@ struct mfd_cell { size_t pdata_size; /* device properties passed to the sub devices drivers */ - const struct property_set *pset; + struct property_entry *properties; /* * Device Tree compatible string @@ -131,4 +131,8 @@ static inline int mfd_add_hotplug_devices(struct device *parent, extern void mfd_remove_devices(struct device *parent); +extern int devm_mfd_add_devices(struct device *dev, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base, struct irq_domain *irq_domain); #endif diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index a677c2bd485c..d641a18abacb 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -50,9 +50,11 @@ enum { EC_MSG_TX_TRAILER_BYTES, EC_MSG_RX_PROTO_BYTES = 3, - /* Max length of messages */ - EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + /* Max length of messages for proto 2*/ + EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + EC_MSG_TX_PROTO_BYTES, + + EC_MAX_MSG_BYTES = 64 * 1024, }; /* @@ -224,6 +226,21 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** + * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC + * + * This function is identical to cros_ec_cmd_xfer, except it returns success + * status only if both the command was transmitted successfully and the EC + * replied with success status. It's not necessary to check msg->result when + * using this function. + * + * @ec_dev: EC device + * @msg: Message to write + * @return: Num. of bytes transferred on success, <0 on failure + */ +int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** * cros_ec_remove - Remove a ChromeOS EC * * Call this to deregister a ChromeOS EC, then clean up any private data. diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 13b630c10d4c..7e7a8d4b4551 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -949,6 +949,37 @@ struct ec_params_pwm_set_fan_duty { uint32_t percent; } __packed; +#define EC_CMD_PWM_SET_DUTY 0x25 +/* 16 bit duty cycle, 0xffff = 100% */ +#define EC_PWM_MAX_DUTY 0xffff + +enum ec_pwm_type { + /* All types, indexed by board-specific enum pwm_channel */ + EC_PWM_TYPE_GENERIC = 0, + /* Keyboard backlight */ + EC_PWM_TYPE_KB_LIGHT, + /* Display backlight */ + EC_PWM_TYPE_DISPLAY_LIGHT, + EC_PWM_TYPE_COUNT, +}; + +struct ec_params_pwm_set_duty { + uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ + uint8_t pwm_type; /* ec_pwm_type */ + uint8_t index; /* Type-specific index, or 0 if unique */ +} __packed; + +#define EC_CMD_PWM_GET_DUTY 0x26 + +struct ec_params_pwm_get_duty { + uint8_t pwm_type; /* ec_pwm_type */ + uint8_t index; /* Type-specific index, or 0 if unique */ +} __packed; + +struct ec_response_pwm_get_duty { + uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ +} __packed; + /*****************************************************************************/ /* * Lightbar commands. This looks worse than it is. Since we only use one HOST diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h new file mode 100644 index 000000000000..304985e288d2 --- /dev/null +++ b/include/linux/mfd/da8xx-cfgchip.h @@ -0,0 +1,153 @@ +/* + * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers. + * + * Copyright (C) 2016 David Lechner <david@lechnology.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H +#define __LINUX_MFD_DA8XX_CFGCHIP_H + +#include <linux/bitops.h> + +/* register offset (32-bit registers) */ +#define CFGCHIP(n) ((n) * 4) + +/* CFGCHIP0 (PLL0/EDMA3_0) register bits */ +#define CFGCHIP0_PLL_MASTER_LOCK BIT(4) +#define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2) +#define CFGCHIP0_EDMA30TC1DBS_MASK CFGCHIP0_EDMA30TC1DBS(0x3) +#define CFGCHIP0_EDMA30TC1DBS_16 CFGCHIP0_EDMA30TC1DBS(0x0) +#define CFGCHIP0_EDMA30TC1DBS_32 CFGCHIP0_EDMA30TC1DBS(0x1) +#define CFGCHIP0_EDMA30TC1DBS_64 CFGCHIP0_EDMA30TC1DBS(0x2) +#define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0) +#define CFGCHIP0_EDMA30TC0DBS_MASK CFGCHIP0_EDMA30TC0DBS(0x3) +#define CFGCHIP0_EDMA30TC0DBS_16 CFGCHIP0_EDMA30TC0DBS(0x0) +#define CFGCHIP0_EDMA30TC0DBS_32 CFGCHIP0_EDMA30TC0DBS(0x1) +#define CFGCHIP0_EDMA30TC0DBS_64 CFGCHIP0_EDMA30TC0DBS(0x2) + +/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */ +#define CFGCHIP1_CAP2SRC(n) ((n) << 27) +#define CFGCHIP1_CAP2SRC_MASK CFGCHIP1_CAP2SRC(0x1f) +#define CFGCHIP1_CAP2SRC_ECAP_PIN CFGCHIP1_CAP2SRC(0x0) +#define CFGCHIP1_CAP2SRC_MCASP0_TX CFGCHIP1_CAP2SRC(0x1) +#define CFGCHIP1_CAP2SRC_MCASP0_RX CFGCHIP1_CAP2SRC(0x2) +#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP2SRC(0x7) +#define CFGCHIP1_CAP2SRC_EMAC_C0_RX CFGCHIP1_CAP2SRC(0x8) +#define CFGCHIP1_CAP2SRC_EMAC_C0_TX CFGCHIP1_CAP2SRC(0x9) +#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC CFGCHIP1_CAP2SRC(0xa) +#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xb) +#define CFGCHIP1_CAP2SRC_EMAC_C1_RX CFGCHIP1_CAP2SRC(0xc) +#define CFGCHIP1_CAP2SRC_EMAC_C1_TX CFGCHIP1_CAP2SRC(0xd) +#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC CFGCHIP1_CAP2SRC(0xe) +#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xf) +#define CFGCHIP1_CAP2SRC_EMAC_C2_RX CFGCHIP1_CAP2SRC(0x10) +#define CFGCHIP1_CAP2SRC_EMAC_C2_TX CFGCHIP1_CAP2SRC(0x11) +#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC CFGCHIP1_CAP2SRC(0x12) +#define CFGCHIP1_CAP1SRC(n) ((n) << 22) +#define CFGCHIP1_CAP1SRC_MASK CFGCHIP1_CAP1SRC(0x1f) +#define CFGCHIP1_CAP1SRC_ECAP_PIN CFGCHIP1_CAP1SRC(0x0) +#define CFGCHIP1_CAP1SRC_MCASP0_TX CFGCHIP1_CAP1SRC(0x1) +#define CFGCHIP1_CAP1SRC_MCASP0_RX CFGCHIP1_CAP1SRC(0x2) +#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP1SRC(0x7) +#define CFGCHIP1_CAP1SRC_EMAC_C0_RX CFGCHIP1_CAP1SRC(0x8) +#define CFGCHIP1_CAP1SRC_EMAC_C0_TX CFGCHIP1_CAP1SRC(0x9) +#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC CFGCHIP1_CAP1SRC(0xa) +#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xb) +#define CFGCHIP1_CAP1SRC_EMAC_C1_RX CFGCHIP1_CAP1SRC(0xc) +#define CFGCHIP1_CAP1SRC_EMAC_C1_TX CFGCHIP1_CAP1SRC(0xd) +#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC CFGCHIP1_CAP1SRC(0xe) +#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xf) +#define CFGCHIP1_CAP1SRC_EMAC_C2_RX CFGCHIP1_CAP1SRC(0x10) +#define CFGCHIP1_CAP1SRC_EMAC_C2_TX CFGCHIP1_CAP1SRC(0x11) +#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC CFGCHIP1_CAP1SRC(0x12) +#define CFGCHIP1_CAP0SRC(n) ((n) << 17) +#define CFGCHIP1_CAP0SRC_MASK CFGCHIP1_CAP0SRC(0x1f) +#define CFGCHIP1_CAP0SRC_ECAP_PIN CFGCHIP1_CAP0SRC(0x0) +#define CFGCHIP1_CAP0SRC_MCASP0_TX CFGCHIP1_CAP0SRC(0x1) +#define CFGCHIP1_CAP0SRC_MCASP0_RX CFGCHIP1_CAP0SRC(0x2) +#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP0SRC(0x7) +#define CFGCHIP1_CAP0SRC_EMAC_C0_RX CFGCHIP1_CAP0SRC(0x8) +#define CFGCHIP1_CAP0SRC_EMAC_C0_TX CFGCHIP1_CAP0SRC(0x9) +#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC CFGCHIP1_CAP0SRC(0xa) +#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xb) +#define CFGCHIP1_CAP0SRC_EMAC_C1_RX CFGCHIP1_CAP0SRC(0xc) +#define CFGCHIP1_CAP0SRC_EMAC_C1_TX CFGCHIP1_CAP0SRC(0xd) +#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC CFGCHIP1_CAP0SRC(0xe) +#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xf) +#define CFGCHIP1_CAP0SRC_EMAC_C2_RX CFGCHIP1_CAP0SRC(0x10) +#define CFGCHIP1_CAP0SRC_EMAC_C2_TX CFGCHIP1_CAP0SRC(0x11) +#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC CFGCHIP1_CAP0SRC(0x12) +#define CFGCHIP1_HPIBYTEAD BIT(16) +#define CFGCHIP1_HPIENA BIT(15) +#define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13) +#define CFGCHIP0_EDMA31TC0DBS_MASK CFGCHIP0_EDMA31TC0DBS(0x3) +#define CFGCHIP0_EDMA31TC0DBS_16 CFGCHIP0_EDMA31TC0DBS(0x0) +#define CFGCHIP0_EDMA31TC0DBS_32 CFGCHIP0_EDMA31TC0DBS(0x1) +#define CFGCHIP0_EDMA31TC0DBS_64 CFGCHIP0_EDMA31TC0DBS(0x2) +#define CFGCHIP1_TBCLKSYNC BIT(12) +#define CFGCHIP1_AMUTESEL0(n) ((n) << 0) +#define CFGCHIP1_AMUTESEL0_MASK CFGCHIP1_AMUTESEL0(0xf) +#define CFGCHIP1_AMUTESEL0_LOW CFGCHIP1_AMUTESEL0(0x0) +#define CFGCHIP1_AMUTESEL0_BANK_0 CFGCHIP1_AMUTESEL0(0x1) +#define CFGCHIP1_AMUTESEL0_BANK_1 CFGCHIP1_AMUTESEL0(0x2) +#define CFGCHIP1_AMUTESEL0_BANK_2 CFGCHIP1_AMUTESEL0(0x3) +#define CFGCHIP1_AMUTESEL0_BANK_3 CFGCHIP1_AMUTESEL0(0x4) +#define CFGCHIP1_AMUTESEL0_BANK_4 CFGCHIP1_AMUTESEL0(0x5) +#define CFGCHIP1_AMUTESEL0_BANK_5 CFGCHIP1_AMUTESEL0(0x6) +#define CFGCHIP1_AMUTESEL0_BANK_6 CFGCHIP1_AMUTESEL0(0x7) +#define CFGCHIP1_AMUTESEL0_BANK_7 CFGCHIP1_AMUTESEL0(0x8) + +/* CFGCHIP2 (USB PHY) register bits */ +#define CFGCHIP2_PHYCLKGD BIT(17) +#define CFGCHIP2_VBUSSENSE BIT(16) +#define CFGCHIP2_RESET BIT(15) +#define CFGCHIP2_OTGMODE(n) ((n) << 13) +#define CFGCHIP2_OTGMODE_MASK CFGCHIP2_OTGMODE(0x3) +#define CFGCHIP2_OTGMODE_NO_OVERRIDE CFGCHIP2_OTGMODE(0x0) +#define CFGCHIP2_OTGMODE_FORCE_HOST CFGCHIP2_OTGMODE(0x1) +#define CFGCHIP2_OTGMODE_FORCE_DEVICE CFGCHIP2_OTGMODE(0x2) +#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW CFGCHIP2_OTGMODE(0x3) +#define CFGCHIP2_USB1PHYCLKMUX BIT(12) +#define CFGCHIP2_USB2PHYCLKMUX BIT(11) +#define CFGCHIP2_PHYPWRDN BIT(10) +#define CFGCHIP2_OTGPWRDN BIT(9) +#define CFGCHIP2_DATPOL BIT(8) +#define CFGCHIP2_USB1SUSPENDM BIT(7) +#define CFGCHIP2_PHY_PLLON BIT(6) +#define CFGCHIP2_SESENDEN BIT(5) +#define CFGCHIP2_VBDTCTEN BIT(4) +#define CFGCHIP2_REFFREQ(n) ((n) << 0) +#define CFGCHIP2_REFFREQ_MASK CFGCHIP2_REFFREQ(0xf) +#define CFGCHIP2_REFFREQ_12MHZ CFGCHIP2_REFFREQ(0x1) +#define CFGCHIP2_REFFREQ_24MHZ CFGCHIP2_REFFREQ(0x2) +#define CFGCHIP2_REFFREQ_48MHZ CFGCHIP2_REFFREQ(0x3) +#define CFGCHIP2_REFFREQ_19_2MHZ CFGCHIP2_REFFREQ(0x4) +#define CFGCHIP2_REFFREQ_38_4MHZ CFGCHIP2_REFFREQ(0x5) +#define CFGCHIP2_REFFREQ_13MHZ CFGCHIP2_REFFREQ(0x6) +#define CFGCHIP2_REFFREQ_26MHZ CFGCHIP2_REFFREQ(0x7) +#define CFGCHIP2_REFFREQ_20MHZ CFGCHIP2_REFFREQ(0x8) +#define CFGCHIP2_REFFREQ_40MHZ CFGCHIP2_REFFREQ(0x9) + +/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */ +#define CFGCHIP3_RMII_SEL BIT(8) +#define CFGCHIP3_UPP_TX_CLKSRC BIT(6) +#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5) +#define CFGCHIP3_ASYNC3_CLKSRC BIT(4) +#define CFGCHIP3_PRUEVTSEL BIT(3) +#define CFGCHIP3_DIV45PENA BIT(2) +#define CFGCHIP3_EMA_CLKSRC BIT(1) + +/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */ +#define CFGCHIP4_AMUTECLR0 BIT(0) + +#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */ diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index c18a4c19d6fc..ce9230af09c2 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -171,7 +171,7 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, unsigned reg_cnt, unsigned char *val) { - int ret; + int ret = 0; int i; for (i = 0; i < reg_cnt; i++) { diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index bf5109d38a26..5d374601404c 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -178,16 +178,6 @@ enum ddr_pwrst { #define DB8500_PRCMU_LEGACY_OFFSET 0xDD4 -struct prcmu_pdata -{ - bool enable_set_ddr_opp; - bool enable_ape_opp_100_voltage; - struct ab8500_platform_data *ab_platdata; - u32 version_offset; - u32 legacy_offset; - u32 adt_offset; -}; - #define PRCMU_FW_PROJECT_U8500 2 #define PRCMU_FW_PROJECT_U8400 3 #define PRCMU_FW_PROJECT_U9500 4 /* Customer specific */ diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h new file mode 100644 index 000000000000..62f03c2b1bb0 --- /dev/null +++ b/include/linux/mfd/hi655x-pmic.h @@ -0,0 +1,64 @@ +/* + * Device driver for regulators in hi655x IC + * + * Copyright (c) 2016 Hisilicon. + * + * Authors: + * Chen Feng <puck.chen@hisilicon.com> + * Fei Wang <w.f@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __HI655X_PMIC_H +#define __HI655X_PMIC_H + +/* Hi655x registers are mapped to memory bus in 4 bytes stride */ +#define HI655X_STRIDE 4 +#define HI655X_BUS_ADDR(x) ((x) << 2) + +#define HI655X_BITS 8 + +#define HI655X_NR_IRQ 32 + +#define HI655X_IRQ_STAT_BASE (0x003 << 2) +#define HI655X_IRQ_MASK_BASE (0x007 << 2) +#define HI655X_ANA_IRQM_BASE (0x1b5 << 2) +#define HI655X_IRQ_ARRAY 4 +#define HI655X_IRQ_MASK 0xFF +#define HI655X_IRQ_CLR 0xFF +#define HI655X_VER_REG 0x00 + +#define PMU_VER_START 0x10 +#define PMU_VER_END 0x38 + +#define RESERVE_INT 7 +#define PWRON_D20R_INT 6 +#define PWRON_D20F_INT 5 +#define PWRON_D4SR_INT 4 +#define VSYS_6P0_D200UR_INT 3 +#define VSYS_UV_D3R_INT 2 +#define VSYS_2P5_R_INT 1 +#define OTMP_D1R_INT 0 + +#define RESERVE_INT_MASK BIT(RESERVE_INT) +#define PWRON_D20R_INT_MASK BIT(PWRON_D20R_INT) +#define PWRON_D20F_INT_MASK BIT(PWRON_D20F_INT) +#define PWRON_D4SR_INT_MASK BIT(PWRON_D4SR_INT) +#define VSYS_6P0_D200UR_INT_MASK BIT(VSYS_6P0_D200UR_INT) +#define VSYS_UV_D3R_INT_MASK BIT(VSYS_UV_D3R_INT) +#define VSYS_2P5_R_INT_MASK BIT(VSYS_2P5_R_INT) +#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT) + +struct hi655x_pmic { + struct resource *res; + struct device *dev; + struct regmap *regmap; + int gpio; + unsigned int ver; + struct regmap_irq_chip_data *irq_data; +}; + +#endif diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h new file mode 100644 index 000000000000..3ca0af07fc78 --- /dev/null +++ b/include/linux/mfd/max77620.h @@ -0,0 +1,346 @@ +/* + * Defining registers address and its bit definitions of MAX77620 and MAX20024 + * + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _MFD_MAX77620_H_ +#define _MFD_MAX77620_H_ + +#include <linux/types.h> + +/* GLOBAL, PMIC, GPIO, FPS, ONOFFC, CID Registers */ +#define MAX77620_REG_CNFGGLBL1 0x00 +#define MAX77620_REG_CNFGGLBL2 0x01 +#define MAX77620_REG_CNFGGLBL3 0x02 +#define MAX77620_REG_CNFG1_32K 0x03 +#define MAX77620_REG_CNFGBBC 0x04 +#define MAX77620_REG_IRQTOP 0x05 +#define MAX77620_REG_INTLBT 0x06 +#define MAX77620_REG_IRQSD 0x07 +#define MAX77620_REG_IRQ_LVL2_L0_7 0x08 +#define MAX77620_REG_IRQ_LVL2_L8 0x09 +#define MAX77620_REG_IRQ_LVL2_GPIO 0x0A +#define MAX77620_REG_ONOFFIRQ 0x0B +#define MAX77620_REG_NVERC 0x0C +#define MAX77620_REG_IRQTOPM 0x0D +#define MAX77620_REG_INTENLBT 0x0E +#define MAX77620_REG_IRQMASKSD 0x0F +#define MAX77620_REG_IRQ_MSK_L0_7 0x10 +#define MAX77620_REG_IRQ_MSK_L8 0x11 +#define MAX77620_REG_ONOFFIRQM 0x12 +#define MAX77620_REG_STATLBT 0x13 +#define MAX77620_REG_STATSD 0x14 +#define MAX77620_REG_ONOFFSTAT 0x15 + +/* SD and LDO Registers */ +#define MAX77620_REG_SD0 0x16 +#define MAX77620_REG_SD1 0x17 +#define MAX77620_REG_SD2 0x18 +#define MAX77620_REG_SD3 0x19 +#define MAX77620_REG_SD4 0x1A +#define MAX77620_REG_DVSSD0 0x1B +#define MAX77620_REG_DVSSD1 0x1C +#define MAX77620_REG_SD0_CFG 0x1D +#define MAX77620_REG_SD1_CFG 0x1E +#define MAX77620_REG_SD2_CFG 0x1F +#define MAX77620_REG_SD3_CFG 0x20 +#define MAX77620_REG_SD4_CFG 0x21 +#define MAX77620_REG_SD_CFG2 0x22 +#define MAX77620_REG_LDO0_CFG 0x23 +#define MAX77620_REG_LDO0_CFG2 0x24 +#define MAX77620_REG_LDO1_CFG 0x25 +#define MAX77620_REG_LDO1_CFG2 0x26 +#define MAX77620_REG_LDO2_CFG 0x27 +#define MAX77620_REG_LDO2_CFG2 0x28 +#define MAX77620_REG_LDO3_CFG 0x29 +#define MAX77620_REG_LDO3_CFG2 0x2A +#define MAX77620_REG_LDO4_CFG 0x2B +#define MAX77620_REG_LDO4_CFG2 0x2C +#define MAX77620_REG_LDO5_CFG 0x2D +#define MAX77620_REG_LDO5_CFG2 0x2E +#define MAX77620_REG_LDO6_CFG 0x2F +#define MAX77620_REG_LDO6_CFG2 0x30 +#define MAX77620_REG_LDO7_CFG 0x31 +#define MAX77620_REG_LDO7_CFG2 0x32 +#define MAX77620_REG_LDO8_CFG 0x33 +#define MAX77620_REG_LDO8_CFG2 0x34 +#define MAX77620_REG_LDO_CFG3 0x35 + +#define MAX77620_LDO_SLEW_RATE_MASK 0x1 + +/* LDO Configuration 3 */ +#define MAX77620_TRACK4_MASK BIT(5) +#define MAX77620_TRACK4_SHIFT 5 + +/* Voltage */ +#define MAX77620_SDX_VOLT_MASK 0xFF +#define MAX77620_SD0_VOLT_MASK 0x3F +#define MAX77620_SD1_VOLT_MASK 0x7F +#define MAX77620_LDO_VOLT_MASK 0x3F + +#define MAX77620_REG_GPIO0 0x36 +#define MAX77620_REG_GPIO1 0x37 +#define MAX77620_REG_GPIO2 0x38 +#define MAX77620_REG_GPIO3 0x39 +#define MAX77620_REG_GPIO4 0x3A +#define MAX77620_REG_GPIO5 0x3B +#define MAX77620_REG_GPIO6 0x3C +#define MAX77620_REG_GPIO7 0x3D +#define MAX77620_REG_PUE_GPIO 0x3E +#define MAX77620_REG_PDE_GPIO 0x3F +#define MAX77620_REG_AME_GPIO 0x40 +#define MAX77620_REG_ONOFFCNFG1 0x41 +#define MAX77620_REG_ONOFFCNFG2 0x42 + +/* FPS Registers */ +#define MAX77620_REG_FPS_CFG0 0x43 +#define MAX77620_REG_FPS_CFG1 0x44 +#define MAX77620_REG_FPS_CFG2 0x45 +#define MAX77620_REG_FPS_LDO0 0x46 +#define MAX77620_REG_FPS_LDO1 0x47 +#define MAX77620_REG_FPS_LDO2 0x48 +#define MAX77620_REG_FPS_LDO3 0x49 +#define MAX77620_REG_FPS_LDO4 0x4A +#define MAX77620_REG_FPS_LDO5 0x4B +#define MAX77620_REG_FPS_LDO6 0x4C +#define MAX77620_REG_FPS_LDO7 0x4D +#define MAX77620_REG_FPS_LDO8 0x4E +#define MAX77620_REG_FPS_SD0 0x4F +#define MAX77620_REG_FPS_SD1 0x50 +#define MAX77620_REG_FPS_SD2 0x51 +#define MAX77620_REG_FPS_SD3 0x52 +#define MAX77620_REG_FPS_SD4 0x53 +#define MAX77620_REG_FPS_NONE 0 + +#define MAX77620_FPS_SRC_MASK 0xC0 +#define MAX77620_FPS_SRC_SHIFT 6 +#define MAX77620_FPS_PU_PERIOD_MASK 0x38 +#define MAX77620_FPS_PU_PERIOD_SHIFT 3 +#define MAX77620_FPS_PD_PERIOD_MASK 0x07 +#define MAX77620_FPS_PD_PERIOD_SHIFT 0 +#define MAX77620_FPS_TIME_PERIOD_MASK 0x38 +#define MAX77620_FPS_TIME_PERIOD_SHIFT 3 +#define MAX77620_FPS_EN_SRC_MASK 0x06 +#define MAX77620_FPS_EN_SRC_SHIFT 1 +#define MAX77620_FPS_ENFPS_SW_MASK 0x01 +#define MAX77620_FPS_ENFPS_SW 0x01 + +/* Minimum and maximum FPS period time (in microseconds) are + * different for MAX77620 and Max20024. + */ +#define MAX77620_FPS_PERIOD_MIN_US 40 +#define MAX20024_FPS_PERIOD_MIN_US 20 + +#define MAX77620_FPS_PERIOD_MAX_US 2560 +#define MAX20024_FPS_PERIOD_MAX_US 5120 + +#define MAX77620_REG_FPS_GPIO1 0x54 +#define MAX77620_REG_FPS_GPIO2 0x55 +#define MAX77620_REG_FPS_GPIO3 0x56 +#define MAX77620_REG_FPS_RSO 0x57 +#define MAX77620_REG_CID0 0x58 +#define MAX77620_REG_CID1 0x59 +#define MAX77620_REG_CID2 0x5A +#define MAX77620_REG_CID3 0x5B +#define MAX77620_REG_CID4 0x5C +#define MAX77620_REG_CID5 0x5D + +#define MAX77620_REG_DVSSD4 0x5E +#define MAX20024_REG_MAX_ADD 0x70 + +#define MAX77620_CID_DIDM_MASK 0xF0 +#define MAX77620_CID_DIDM_SHIFT 4 + +/* CNCG2SD */ +#define MAX77620_SD_CNF2_ROVS_EN_SD1 BIT(1) +#define MAX77620_SD_CNF2_ROVS_EN_SD0 BIT(2) + +/* Device Identification Metal */ +#define MAX77620_CID5_DIDM(n) (((n) >> 4) & 0xF) +/* Device Indentification OTP */ +#define MAX77620_CID5_DIDO(n) ((n) & 0xF) + +/* SD CNFG1 */ +#define MAX77620_SD_SR_MASK 0xC0 +#define MAX77620_SD_SR_SHIFT 6 +#define MAX77620_SD_POWER_MODE_MASK 0x30 +#define MAX77620_SD_POWER_MODE_SHIFT 4 +#define MAX77620_SD_CFG1_ADE_MASK BIT(3) +#define MAX77620_SD_CFG1_ADE_DISABLE 0 +#define MAX77620_SD_CFG1_ADE_ENABLE BIT(3) +#define MAX77620_SD_FPWM_MASK 0x04 +#define MAX77620_SD_FPWM_SHIFT 2 +#define MAX77620_SD_FSRADE_MASK 0x01 +#define MAX77620_SD_FSRADE_SHIFT 0 +#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) +#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 +#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) +#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) +#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 +#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) + +/* LDO_CNFG2 */ +#define MAX77620_LDO_POWER_MODE_MASK 0xC0 +#define MAX77620_LDO_POWER_MODE_SHIFT 6 +#define MAX77620_LDO_CFG2_ADE_MASK BIT(1) +#define MAX77620_LDO_CFG2_ADE_DISABLE 0 +#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) +#define MAX77620_LDO_CFG2_SS_MASK BIT(0) +#define MAX77620_LDO_CFG2_SS_FAST BIT(0) +#define MAX77620_LDO_CFG2_SS_SLOW 0 + +#define MAX77620_IRQ_TOP_GLBL_MASK BIT(7) +#define MAX77620_IRQ_TOP_SD_MASK BIT(6) +#define MAX77620_IRQ_TOP_LDO_MASK BIT(5) +#define MAX77620_IRQ_TOP_GPIO_MASK BIT(4) +#define MAX77620_IRQ_TOP_RTC_MASK BIT(3) +#define MAX77620_IRQ_TOP_32K_MASK BIT(2) +#define MAX77620_IRQ_TOP_ONOFF_MASK BIT(1) + +#define MAX77620_IRQ_LBM_MASK BIT(3) +#define MAX77620_IRQ_TJALRM1_MASK BIT(2) +#define MAX77620_IRQ_TJALRM2_MASK BIT(1) + +#define MAX77620_PWR_I2C_ADDR 0x3c +#define MAX77620_RTC_I2C_ADDR 0x68 + +#define MAX77620_CNFG_GPIO_DRV_MASK BIT(0) +#define MAX77620_CNFG_GPIO_DRV_PUSHPULL BIT(0) +#define MAX77620_CNFG_GPIO_DRV_OPENDRAIN 0 +#define MAX77620_CNFG_GPIO_DIR_MASK BIT(1) +#define MAX77620_CNFG_GPIO_DIR_INPUT BIT(1) +#define MAX77620_CNFG_GPIO_DIR_OUTPUT 0 +#define MAX77620_CNFG_GPIO_INPUT_VAL_MASK BIT(2) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW 0 +#define MAX77620_CNFG_GPIO_INT_MASK (0x3 << 4) +#define MAX77620_CNFG_GPIO_INT_FALLING BIT(4) +#define MAX77620_CNFG_GPIO_INT_RISING BIT(5) +#define MAX77620_CNFG_GPIO_DBNC_MASK (0x3 << 6) +#define MAX77620_CNFG_GPIO_DBNC_None (0x0 << 6) +#define MAX77620_CNFG_GPIO_DBNC_8ms (0x1 << 6) +#define MAX77620_CNFG_GPIO_DBNC_16ms (0x2 << 6) +#define MAX77620_CNFG_GPIO_DBNC_32ms (0x3 << 6) + +#define MAX77620_IRQ_LVL2_GPIO_EDGE0 BIT(0) +#define MAX77620_IRQ_LVL2_GPIO_EDGE1 BIT(1) +#define MAX77620_IRQ_LVL2_GPIO_EDGE2 BIT(2) +#define MAX77620_IRQ_LVL2_GPIO_EDGE3 BIT(3) +#define MAX77620_IRQ_LVL2_GPIO_EDGE4 BIT(4) +#define MAX77620_IRQ_LVL2_GPIO_EDGE5 BIT(5) +#define MAX77620_IRQ_LVL2_GPIO_EDGE6 BIT(6) +#define MAX77620_IRQ_LVL2_GPIO_EDGE7 BIT(7) + +#define MAX77620_CNFG1_32K_OUT0_EN BIT(2) + +#define MAX77620_ONOFFCNFG1_SFT_RST BIT(7) +#define MAX77620_ONOFFCNFG1_MRT_MASK 0x38 +#define MAX77620_ONOFFCNFG1_MRT_SHIFT 0x3 +#define MAX77620_ONOFFCNFG1_SLPEN BIT(2) +#define MAX77620_ONOFFCNFG1_PWR_OFF BIT(1) +#define MAX20024_ONOFFCNFG1_CLRSE 0x18 + +#define MAX77620_ONOFFCNFG2_SFT_RST_WK BIT(7) +#define MAX77620_ONOFFCNFG2_WD_RST_WK BIT(6) +#define MAX77620_ONOFFCNFG2_SLP_LPM_MSK BIT(5) +#define MAX77620_ONOFFCNFG2_WK_ALARM1 BIT(2) +#define MAX77620_ONOFFCNFG2_WK_EN0 BIT(0) + +#define MAX77620_GLBLM_MASK BIT(0) + +#define MAX77620_WDTC_MASK 0x3 +#define MAX77620_WDTOFFC BIT(4) +#define MAX77620_WDTSLPC BIT(3) +#define MAX77620_WDTEN BIT(2) + +#define MAX77620_TWD_MASK 0x3 +#define MAX77620_TWD_2s 0x0 +#define MAX77620_TWD_16s 0x1 +#define MAX77620_TWD_64s 0x2 +#define MAX77620_TWD_128s 0x3 + +#define MAX77620_CNFGGLBL1_LBDAC_EN BIT(7) +#define MAX77620_CNFGGLBL1_MPPLD BIT(6) +#define MAX77620_CNFGGLBL1_LBHYST (BIT(5) | BIT(4)) +#define MAX77620_CNFGGLBL1_LBDAC 0x0E +#define MAX77620_CNFGGLBL1_LBRSTEN BIT(0) + +/* CNFG BBC registers */ +#define MAX77620_CNFGBBC_ENABLE BIT(0) +#define MAX77620_CNFGBBC_CURRENT_MASK 0x06 +#define MAX77620_CNFGBBC_CURRENT_SHIFT 1 +#define MAX77620_CNFGBBC_VOLTAGE_MASK 0x18 +#define MAX77620_CNFGBBC_VOLTAGE_SHIFT 3 +#define MAX77620_CNFGBBC_LOW_CURRENT_DISABLE BIT(5) +#define MAX77620_CNFGBBC_RESISTOR_MASK 0xC0 +#define MAX77620_CNFGBBC_RESISTOR_SHIFT 6 + +#define MAX77620_FPS_COUNT 3 + +/* Interrupts */ +enum { + MAX77620_IRQ_TOP_GLBL, /* Low-Battery */ + MAX77620_IRQ_TOP_SD, /* SD power fail */ + MAX77620_IRQ_TOP_LDO, /* LDO power fail */ + MAX77620_IRQ_TOP_GPIO, /* TOP GPIO internal int to MAX77620 */ + MAX77620_IRQ_TOP_RTC, /* RTC */ + MAX77620_IRQ_TOP_32K, /* 32kHz oscillator */ + MAX77620_IRQ_TOP_ONOFF, /* ON/OFF oscillator */ + MAX77620_IRQ_LBT_MBATLOW, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM1, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM2, /* Thermal alarm status, > 140C */ +}; + +/* GPIOs */ +enum { + MAX77620_GPIO0, + MAX77620_GPIO1, + MAX77620_GPIO2, + MAX77620_GPIO3, + MAX77620_GPIO4, + MAX77620_GPIO5, + MAX77620_GPIO6, + MAX77620_GPIO7, + MAX77620_GPIO_NR, +}; + +/* FPS Source */ +enum max77620_fps_src { + MAX77620_FPS_SRC_0, + MAX77620_FPS_SRC_1, + MAX77620_FPS_SRC_2, + MAX77620_FPS_SRC_NONE, + MAX77620_FPS_SRC_DEF, +}; + +enum max77620_chip_id { + MAX77620, + MAX20024, +}; + +struct max77620_chip { + struct device *dev; + struct regmap *rmap; + + int chip_irq; + int irq_base; + + /* chip id */ + enum max77620_chip_id chip_id; + + bool sleep_enable; + bool enable_global_lpm; + int shutdown_fps_period[MAX77620_FPS_COUNT]; + int suspend_fps_period[MAX77620_FPS_COUNT]; + + struct regmap_irq_chip_data *top_irq_data; + struct regmap_irq_chip_data *gpio_irq_data; +}; + +#endif /* _MFD_MAX77620_H_ */ diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index c72d5344f3b3..cadc6543909d 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -20,6 +20,7 @@ #define RN5T618_OTPVER 0x01 #define RN5T618_IODAC 0x02 #define RN5T618_VINDAC 0x03 +#define RN5T618_OUT32KEN 0x05 #define RN5T618_CPUCNT 0x06 #define RN5T618_PSWR 0x07 #define RN5T618_PONHIS 0x09 @@ -38,6 +39,7 @@ #define RN5T618_DC1_SLOT 0x16 #define RN5T618_DC2_SLOT 0x17 #define RN5T618_DC3_SLOT 0x18 +#define RN5T618_DC4_SLOT 0x19 #define RN5T618_LDO1_SLOT 0x1b #define RN5T618_LDO2_SLOT 0x1c #define RN5T618_LDO3_SLOT 0x1d @@ -54,12 +56,16 @@ #define RN5T618_DC2CTL2 0x2f #define RN5T618_DC3CTL 0x30 #define RN5T618_DC3CTL2 0x31 +#define RN5T618_DC4CTL 0x32 +#define RN5T618_DC4CTL2 0x33 #define RN5T618_DC1DAC 0x36 #define RN5T618_DC2DAC 0x37 #define RN5T618_DC3DAC 0x38 +#define RN5T618_DC4DAC 0x39 #define RN5T618_DC1DAC_SLP 0x3b #define RN5T618_DC2DAC_SLP 0x3c #define RN5T618_DC3DAC_SLP 0x3d +#define RN5T618_DC4DAC_SLP 0x3e #define RN5T618_DCIREN 0x40 #define RN5T618_DCIRQ 0x41 #define RN5T618_DCIRMON 0x42 @@ -211,6 +217,7 @@ enum { RN5T618_DCDC1, RN5T618_DCDC2, RN5T618_DCDC3, + RN5T618_DCDC4, RN5T618_LDO1, RN5T618_LDO2, RN5T618_LDO3, @@ -221,8 +228,14 @@ enum { RN5T618_REG_NUM, }; +enum { + RN5T567 = 0, + RN5T618, +}; + struct rn5t618 { struct regmap *regmap; + long variant; }; #endif /* __LINUX_MFD_RN5T618_H */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 6bc4bcd488ac..5a23dd4df432 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -30,6 +30,9 @@ #define MIN_600_MV 600000 #define MIN_500_MV 500000 +/* Ramp delay in uV/us */ +#define RAMP_DELAY_12_MVUS 12000 + /* Macros to represent steps for LDO/BUCK */ #define STEP_50_MV 50000 #define STEP_25_MV 25000 diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index cb83883918a7..de748bc7525e 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h @@ -62,6 +62,7 @@ enum { struct stmpe_variant_info; struct stmpe_client_info; +struct stmpe_platform_data; /** * struct stmpe - STMPE MFD structure @@ -117,25 +118,4 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) -/** - * struct stmpe_platform_data - STMPE platform data - * @id: device id to distinguish between multiple STMPEs on the same board - * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) - * @irq_trigger: IRQ trigger to use for the interrupt to the host - * @autosleep: bool to enable/disable stmpe autosleep - * @autosleep_timeout: inactivity timeout in milliseconds for autosleep - * @irq_over_gpio: true if gpio is used to get irq - * @irq_gpio: gpio number over which irq will be requested (significant only if - * irq_over_gpio is true) - */ -struct stmpe_platform_data { - int id; - unsigned int blocks; - unsigned int irq_trigger; - bool autosleep; - bool irq_over_gpio; - int irq_gpio; - int autosleep_timeout; -}; - #endif diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index 1088149be0c9..40a76b97b7ab 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -16,6 +16,7 @@ #define __LINUX_MFD_SYSCON_H__ #include <linux/err.h> +#include <linux/errno.h> struct device_node; diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h index 9352adc95de6..76f30f940c70 100644 --- a/include/linux/mfd/syscon/exynos5-pmu.h +++ b/include/linux/mfd/syscon/exynos5-pmu.h @@ -38,6 +38,9 @@ /* Exynos5433 specific register definitions */ #define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728) +#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710) +#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714) +#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718) #define EXYNOS5_PHY_ENABLE BIT(0) diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index 238c8db953eb..c8e0164c5423 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -95,6 +95,7 @@ #define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0) #define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30) +#define IMX6Q_GPR1_PCIE_SW_RST BIT(29) #define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28) #define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27) #define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26) @@ -447,5 +448,11 @@ #define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18) #define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17) #define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17) +#define IMX6UL_GPR1_SAI1_MCLK_DIR (0x1 << 19) +#define IMX6UL_GPR1_SAI2_MCLK_DIR (0x1 << 20) +#define IMX6UL_GPR1_SAI3_MCLK_DIR (0x1 << 21) +#define IMX6UL_GPR1_SAI_MCLK_MASK (0x7 << 19) +#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \ + IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR) #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 1fd50dcfe47c..7f55b8b41032 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -138,22 +138,22 @@ /* * time in us for processing a single channel, calculated as follows: * - * num cycles = open delay + (sample delay + conv time) * averaging + * max num cycles = open delay + (sample delay + conv time) * averaging * - * num cycles: 152 + (1 + 13) * 16 = 376 + * max num cycles: 262143 + (255 + 13) * 16 = 266431 * * clock frequency: 26MHz / 8 = 3.25MHz * clock period: 1 / 3.25MHz = 308ns * - * processing time: 376 * 308ns = 116us + * max processing time: 266431 * 308ns = 83ms(approx) */ -#define IDLE_TIMEOUT 116 /* microsec */ +#define IDLE_TIMEOUT 83 /* milliseconds */ #define TSCADC_CELLS 2 struct ti_tscadc_dev { struct device *dev; - struct regmap *regmap_tscadc; + struct regmap *regmap; void __iomem *tscadc_base; int irq; int used_cells; /* 1-2 */ diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 05d58ee5e6a7..7a26286db895 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -66,8 +66,8 @@ */ #define TMIO_MMC_SDIO_IRQ (1 << 2) -/* Some controllers don't need to wait 10ms for clock changes */ -#define TMIO_MMC_FAST_CLK_CHG (1 << 3) +/* Some features are only available or tested on RCar Gen2 or later */ +#define TMIO_MMC_MIN_RCAR2 (1 << 3) /* * Some controllers require waiting for the SD bus to become diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index ac7fba44d7e4..1c88231496d3 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -257,6 +257,7 @@ struct tps65217 { unsigned long id; struct regulator_desc desc[TPS65217_NUM_REGULATOR]; struct regmap *regmap; + u8 *strobes; }; static inline struct tps65217 *dev_to_tps65217(struct device *dev) diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index d58f3b5f585a..7fdf5326f34e 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -246,6 +246,7 @@ enum tps65218_irqs { * @name: Voltage regulator name * @min_uV: minimum micro volts * @max_uV: minimum micro volts + * @strobe: sequencing strobe value for the regulator * * This data is used to check the regualtor voltage limits while setting. */ @@ -254,6 +255,7 @@ struct tps_info { const char *name; int min_uV; int max_uV; + int strobe; }; /** diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h index 8f9fc3d26e6d..36795a1be479 100644 --- a/include/linux/mfd/twl6040.h +++ b/include/linux/mfd/twl6040.h @@ -134,6 +134,7 @@ #define TWL6040_HFDACENA (1 << 0) #define TWL6040_HFPGAENA (1 << 1) #define TWL6040_HFDRVENA (1 << 4) +#define TWL6040_HFSWENA (1 << 6) /* VIBCTLL/R (0x18/0x1A) fields */ @@ -225,6 +226,7 @@ struct twl6040 { struct regmap_irq_chip_data *irq_data; struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */ struct clk *clk32k; + struct clk *mclk; struct mutex mutex; struct mutex irq_mutex; struct mfd_cell cells[TWL6040_CELLS]; @@ -236,8 +238,8 @@ struct twl6040 { /* PLL configuration */ int pll; - unsigned int sysclk; - unsigned int mclk; + unsigned int sysclk_rate; + unsigned int mclk_rate; unsigned int irq; unsigned int irq_ready; diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h index 2de565b94d0c..4ee908f5b834 100644 --- a/include/linux/mfd/wm8400-private.h +++ b/include/linux/mfd/wm8400-private.h @@ -923,7 +923,6 @@ struct wm8400 { #define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */ #define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */ -u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg); int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 2e5b194b9b19..257173e0095e 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,6 +37,7 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 +#define MICREL_PHY_FXEN 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 9b50325e4ddf..ae8d475a9385 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -37,6 +37,8 @@ extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); +extern void putback_movable_page(struct page *page); extern int migrate_prep(void); extern int migrate_prep_local(void); @@ -69,6 +71,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ +#ifdef CONFIG_COMPACTION +extern int PageMovable(struct page *page); +extern void __SetPageMovable(struct page *page, struct address_space *mapping); +extern void __ClearPageMovable(struct page *page); +#else +static inline int PageMovable(struct page *page) { return 0; }; +static inline void __SetPageMovable(struct page *page, + struct address_space *mapping) +{ +} +static inline void __ClearPageMovable(struct page *page) +{ +} +#endif + #ifdef CONFIG_NUMA_BALANCING extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index d1f904c8b2cb..42da3552f7cb 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -220,6 +220,7 @@ enum { MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33, MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34, + MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35, }; enum { @@ -466,6 +467,7 @@ enum { enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, + MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ @@ -535,6 +537,7 @@ struct mlx4_caps { int max_rq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; + int max_tc_eth; u32 *qp0_qkey; u32 *qp0_proxy; u32 *qp1_proxy; @@ -1058,7 +1061,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) { - if (BITS_PER_LONG == 64 || buf->nbufs == 1) + if (buf->nbufs == 1) return buf->direct.buf + offset; else return buf->page_list[offset >> PAGE_SHIFT].buf + @@ -1098,7 +1101,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, - int size, int max_direct); + int size); void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int size); @@ -1340,6 +1343,9 @@ enum { VXLAN_STEER_BY_INNER_VLAN = 1 << 4, }; +enum { + MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS = 0x2, +}; int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode); @@ -1380,6 +1386,9 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_test_interrupts(struct mlx4_dev *dev); +int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, + const u32 offset[], u32 value[], + size_t array_len, u8 port); u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port); bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector); struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port); @@ -1494,6 +1503,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset, u16 size, u8 *data); +int mlx4_max_tc(struct mlx4_dev *dev); /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 587cdf943b52..deaa2217214d 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -291,16 +291,18 @@ enum { MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0, }; +union mlx4_wqe_qpn_vlan { + struct { + __be16 vlan_tag; + u8 ins_vlan; + u8 fence_size; + }; + __be32 bf_qpn; +}; + struct mlx4_wqe_ctrl_seg { __be32 owner_opcode; - union { - struct { - __be16 vlan_tag; - u8 ins_vlan; - u8 fence_size; - }; - __be32 bf_qpn; - }; + union mlx4_wqe_qpn_vlan qpn_vlan; /* * High 24 bits are SRC remote buffer; low 8 bits are flags: * [7] SO (strong ordering) diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index b2c9fada8eac..2566f6d6444f 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -53,6 +53,13 @@ struct mlx5_core_cq { unsigned arm_sn; struct mlx5_rsc_debug *dbg; int pid; + struct { + struct list_head list; + void (*comp)(struct mlx5_core_cq *); + void *priv; + } tasklet_ctx; + int reset_notify_added; + struct list_head reset_notify; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index b3575f392492..0b6d15cddb2f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -59,6 +59,7 @@ #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) @@ -128,6 +129,13 @@ __mlx5_mask(typ, fld)) tmp; \ }) +enum mlx5_inline_modes { + MLX5_INLINE_MODE_NONE, + MLX5_INLINE_MODE_L2, + MLX5_INLINE_MODE_IP, + MLX5_INLINE_MODE_TCP_UDP, +}; + enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, @@ -655,7 +663,9 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 rsvd0[4]; + u8 outer_l3_tunneled; + u8 rsvd0; + __be16 wqe_id; u8 lro_tcppsh_abort_dupack; u8 lro_min_ttl; __be16 lro_tcp_win; @@ -668,7 +678,7 @@ struct mlx5_cqe64 { __be16 slid; __be32 flags_rqpn; u8 hds_ip_ext; - u8 l4_hdr_type_etc; + u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; @@ -682,6 +692,40 @@ struct mlx5_cqe64 { u8 op_own; }; +struct mlx5_mini_cqe8 { + union { + __be32 rx_hash_result; + struct { + __be16 checksum; + __be16 rsvd; + }; + struct { + __be16 wqe_counter; + u8 s_wqe_opcode; + u8 reserved; + } s_wqe_info; + }; + __be32 byte_cnt; +}; + +enum { + MLX5_NO_INLINE_DATA, + MLX5_INLINE_DATA32_SEG, + MLX5_INLINE_DATA64_SEG, + MLX5_COMPRESSED, +}; + +enum { + MLX5_CQE_FORMAT_CSUM = 0x1, +}; + +#define MLX5_MINI_CQE_ARRAY_SIZE 8 + +static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) +{ + return (cqe->op_own >> 2) & 0x3; +} + static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; @@ -689,12 +733,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) { - return (cqe->l4_hdr_type_etc >> 4) & 0x7; + return (cqe->l4_l3_hdr_type >> 4) & 0x7; +} + +static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) +{ + return (cqe->l4_l3_hdr_type >> 2) & 0x3; +} + +static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) +{ + return cqe->outer_l3_tunneled & 0x1; } static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) { - return !!(cqe->l4_hdr_type_etc & 0x1); + return !!(cqe->l4_l3_hdr_type & 0x1); } static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) @@ -707,6 +761,42 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } +struct mpwrq_cqe_bc { + __be16 filler_consumed_strides; + __be16 byte_cnt; +}; + +static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return be16_to_cpu(bc->byte_cnt); +} + +static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) +{ + return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return mpwrq_get_cqe_bc_consumed_strides(bc); +} + +static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) +{ + return be16_to_cpu(cqe->wqe_counter); +} + enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, @@ -1157,8 +1247,6 @@ struct mlx5_destroy_psv_out { u8 rsvd[8]; }; -#define MLX5_CMD_OP_MAX 0x920 - enum { VPORT_STATE_DOWN = 0x0, VPORT_STATE_UP = 0x1, @@ -1249,6 +1337,7 @@ enum mlx5_cap_type { MLX5_CAP_ESWITCH, MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, + MLX5_CAP_QOS, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1286,6 +1375,12 @@ enum mlx5_cap_type { #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) +#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) + +#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) + #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) @@ -1300,6 +1395,18 @@ enum mlx5_cap_type { #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) +#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) + #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) @@ -1315,6 +1422,9 @@ enum mlx5_cap_type { MLX5_GET(vector_calc_cap, \ mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) +#define MLX5_CAP_QOS(mdev, cap)\ + MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, @@ -1342,6 +1452,7 @@ enum { MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, + MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 369c837d40f5..ccea6fb16482 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -41,9 +41,16 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/radix-tree.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> +#include <linux/mlx5/srq.h> + +enum { + MLX5_RQ_BITMASK_VSD = 1 << 1, +}; enum { MLX5_BOARD_ID_LEN = 64, @@ -112,9 +119,12 @@ enum { MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, - MLX5_REG_PMLP = 0, /* TBD */ + MLX5_REG_PCMR = 0x5041, + MLX5_REG_PMLP = 0x5002, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MCIA = 0x9014, + MLX5_REG_MLCR = 0x902b, }; enum { @@ -304,6 +314,14 @@ struct mlx5_buf { u8 page_shift; }; +struct mlx5_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; @@ -317,6 +335,7 @@ struct mlx5_eq { struct list_head list; int index; struct mlx5_rsc_debug *dbg; + struct mlx5_eq_tasklet tasklet_ctx; }; struct mlx5_core_psv { @@ -450,8 +469,34 @@ struct mlx5_irq_info { char name[MLX5_MAX_IRQ_NAME]; }; +struct mlx5_fc_stats { + struct rb_root counters; + struct list_head addlist; + /* protect addlist add/splice operations */ + spinlock_t addlist_lock; + + struct workqueue_struct *wq; + struct delayed_work work; + unsigned long next_query; +}; + struct mlx5_eswitch; +struct mlx5_rl_entry { + u32 rate; + u16 index; + u16 refcount; +}; + +struct mlx5_rl_table { + /* protect rate limit table */ + struct mutex rl_lock; + u16 max_size; + u32 max_rate; + u32 min_rate; + struct mlx5_rl_entry *rl_entry; +}; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; @@ -506,11 +551,12 @@ struct mlx5_priv { struct list_head ctx_list; spinlock_t ctx_lock; + struct mlx5_flow_steering *steering; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; unsigned long pci_dev_data; - struct mlx5_flow_root_namespace *root_ns; - struct mlx5_flow_root_namespace *fdb_root_ns; + struct mlx5_fc_stats fc_stats; + struct mlx5_rl_table rl_table; }; enum mlx5_device_state { @@ -529,6 +575,18 @@ enum mlx5_pci_status { MLX5_PCI_STATUS_ENABLED, }; +struct mlx5_td { + struct list_head tirs_list; + u32 tdn; +}; + +struct mlx5e_resources { + struct mlx5_uar cq_uar; + u32 pdn; + struct mlx5_td td; + struct mlx5_core_mkey mkey; +}; + struct mlx5_core_dev { struct pci_dev *pdev; /* sync pci state */ @@ -553,6 +611,10 @@ struct mlx5_core_dev { struct mlx5_profile *profile; atomic_t num_qps; u32 issi; + struct mlx5e_resources mlx5e_res; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif }; struct mlx5_db { @@ -593,6 +655,7 @@ struct mlx5_cmd_work_ent { void *uout; int uout_size; mlx5_cmd_cbk_t callback; + struct delayed_work cb_timeout_work; void *context; int idx; struct completion done; @@ -736,11 +799,10 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen, - int is_xrc); + struct mlx5_srq_attr *in); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out); + struct mlx5_srq_attr *out); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); void mlx5_init_mkey_table(struct mlx5_core_dev *dev); @@ -825,6 +887,12 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); +int mlx5_init_rl_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); + static inline int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; @@ -902,6 +970,11 @@ static inline int mlx5_get_gid_table_len(u16 param) return 8 * (1 << param); } +static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) +{ + return !!(dev->priv.rl_table.max_size); +} + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 8dec5508d93d..e036d6030867 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -54,10 +54,14 @@ static inline void build_leftovers_ft_param(int *priority, enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_BYPASS, + MLX5_FLOW_NAMESPACE_OFFLOADS, + MLX5_FLOW_NAMESPACE_ETHTOOL, MLX5_FLOW_NAMESPACE_KERNEL, MLX5_FLOW_NAMESPACE_LEFTOVERS, MLX5_FLOW_NAMESPACE_ANCHOR, MLX5_FLOW_NAMESPACE_FDB, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, }; struct mlx5_flow_table; @@ -65,12 +69,19 @@ struct mlx5_flow_group; struct mlx5_flow_rule; struct mlx5_flow_namespace; +struct mlx5_flow_spec { + u8 match_criteria_enable; + u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; + u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; +}; + struct mlx5_flow_destination { enum mlx5_flow_destination_type type; union { u32 tir_num; struct mlx5_flow_table *ft; u32 vport_num; + struct mlx5_fc *counter; }; }; @@ -82,12 +93,19 @@ struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, - int max_num_groups); + int max_num_groups, + u32 level); struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, - int num_flow_table_entries); + int num_flow_table_entries, + u32 level); +struct mlx5_flow_table * +mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + u32 level, u16 vport); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); /* inbox should be set with the following values: @@ -105,12 +123,19 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); */ struct mlx5_flow_rule * mlx5_add_flow_rule(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, + struct mlx5_flow_spec *spec, u32 action, u32 flow_tag, struct mlx5_flow_destination *dest); void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); +int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest); + +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse); + #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c15b8a864937..d1f9a581aca8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -123,6 +123,10 @@ enum { MLX5_CMD_OP_DRAIN_DCT = 0x712, MLX5_CMD_OP_QUERY_DCT = 0x713, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, + MLX5_CMD_OP_CREATE_XRQ = 0x717, + MLX5_CMD_OP_DESTROY_XRQ = 0x718, + MLX5_CMD_OP_QUERY_XRQ = 0x719, + MLX5_CMD_OP_ARM_XRQ = 0x71a, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, @@ -139,6 +143,8 @@ enum { MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, + MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, + MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, @@ -202,7 +208,11 @@ enum { MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, - MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c + MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, + MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, + MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, + MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, + MLX5_CMD_OP_MAX }; struct mlx5_ifc_flow_table_fields_supported_bits { @@ -265,7 +275,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits { struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_1[0x1]; + u8 flow_counter[0x1]; u8 flow_modify_en[0x1]; u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; @@ -357,7 +368,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { }; struct mlx5_ifc_fte_match_set_misc_bits { - u8 reserved_at_0[0x20]; + u8 reserved_at_0[0x8]; + u8 source_sqn[0x18]; u8 reserved_at_20[0x10]; u8 source_port[0x10]; @@ -496,11 +508,24 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x1b]; + u8 reserved_at_5[0x19]; + u8 nic_vport_node_guid_modify[0x1]; + u8 nic_vport_port_guid_modify[0x1]; u8 reserved_at_20[0x7e0]; }; +struct mlx5_ifc_qos_cap_bits { + u8 packet_pacing[0x1]; + u8 reserved_0[0x1f]; + u8 reserved_1[0x20]; + u8 packet_pacing_max_rate[0x20]; + u8 packet_pacing_min_rate[0x20]; + u8 reserved_2[0x10]; + u8 packet_pacing_rate_table_size[0x10]; + u8 reserved_3[0x760]; +}; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; @@ -511,9 +536,12 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; - u8 reserved_at_10[0x4]; + u8 reserved_at_10[0x2]; + u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; - u8 reserved_at_18[0x3]; + u8 reg_umr_sq[0x1]; + u8 scatter_fcs[0x1]; + u8 reserved_at_1a[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; u8 reserved_at_1c[0x2]; u8 tunnel_statless_gre[0x1]; @@ -648,7 +676,7 @@ struct mlx5_ifc_vector_calc_cap_bits { enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, - MLX5_WQ_TYPE_STRQ = 0x2, + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, }; enum { @@ -738,7 +766,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; - u8 reserved_at_182[0x4]; + u8 retransmission_q_counters[0x1]; + u8 reserved_at_183[0x3]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; @@ -750,21 +779,27 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; - u8 early_vf_enable; - u8 reserved_at_1a8[0x2]; + u8 early_vf_enable[0x1]; + u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; - u8 reserved_at_1af[0x6]; + u8 reserved_at_1af[0x2]; + u8 ports_check[0x1]; + u8 reserved_at_1b2[0x1]; + u8 disable_link_up[0x1]; + u8 beacon_led[0x1]; u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_at_1bf[0x3]; + u8 reserved_at_1c0[0x3]; u8 log_max_msg[0x5]; - u8 reserved_at_1c7[0x4]; + u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; - u8 reserved_at_1cf[0x6]; + u8 reserved_at_1d0[0x1]; + u8 dcbx[0x1]; + u8 reserved_at_1d2[0x4]; u8 rol_s[0x1]; u8 rol_g[0x1]; - u8 reserved_at_1d7[0x1]; + u8 reserved_at_1d8[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; @@ -774,47 +809,48 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_at_1ef[0xc]; + u8 reserved_at_1f0[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; - u8 reserved_at_200[0x3]; + u8 striding_rq[0x1]; + u8 reserved_at_201[0x2]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_204[0xa]; + u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; - u8 reserved_at_212[0x1]; + u8 reserved_at_213[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; - u8 reserved_at_215[0x1]; + u8 reserved_at_216[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; - u8 reserved_at_21a[0x1]; + u8 qos[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; - u8 reserved_at_21e[0x1]; + u8 reserved_at_21f[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 reserved_at_222[0x3]; + u8 reserved_at_223[0x3]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; - u8 reserved_at_228[0x1]; + u8 reserved_at_229[0x1]; u8 scqe_break_moderation[0x1]; - u8 reserved_at_22a[0x1]; + u8 cq_period_start_from_cqe[0x1]; u8 cd[0x1]; - u8 reserved_at_22c[0x1]; + u8 reserved_at_22d[0x1]; u8 apm[0x1]; u8 vector_calc[0x1]; - u8 reserved_at_22f[0x1]; + u8 umr_ptr_rlky[0x1]; u8 imaicl[0x1]; - u8 reserved_at_231[0x4]; + u8 reserved_at_232[0x4]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; @@ -824,104 +860,120 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_at_23f[0xa]; + u8 reserved_at_240[0xa]; u8 uar_sz[0x6]; - u8 reserved_at_24f[0x8]; + u8 reserved_at_250[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_at_260[0x1]; + u8 reserved_at_261[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_at_262[0x8]; + u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_at_26f[0x10]; + u8 reserved_at_270[0x10]; - u8 reserved_at_27f[0x10]; + u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_at_29f[0x10]; + u8 reserved_at_2a0[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 reserved_at_2bf[0x10]; + u8 reserved_at_2c0[0x10]; u8 max_wqe_sz_sq_dc[0x10]; - u8 reserved_at_2df[0x7]; + u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_at_2ff[0x18]; + u8 reserved_at_300[0x18]; u8 log_max_mcg[0x8]; - u8 reserved_at_31f[0x3]; + u8 reserved_at_320[0x3]; u8 log_max_transport_domain[0x5]; - u8 reserved_at_327[0x3]; + u8 reserved_at_328[0x3]; u8 log_max_pd[0x5]; - u8 reserved_at_32f[0xb]; + u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_at_33f[0x20]; + u8 reserved_at_340[0x8]; + u8 log_max_flow_counter_bulk[0x8]; + u8 max_flow_counter[0x10]; - u8 reserved_at_35f[0x3]; + + u8 reserved_at_360[0x3]; u8 log_max_rq[0x5]; - u8 reserved_at_367[0x3]; + u8 reserved_at_368[0x3]; u8 log_max_sq[0x5]; - u8 reserved_at_36f[0x3]; + u8 reserved_at_370[0x3]; u8 log_max_tir[0x5]; - u8 reserved_at_377[0x3]; + u8 reserved_at_378[0x3]; u8 log_max_tis[0x5]; u8 basic_cyclic_rcv_wqe[0x1]; - u8 reserved_at_380[0x2]; + u8 reserved_at_381[0x2]; u8 log_max_rmp[0x5]; - u8 reserved_at_387[0x3]; + u8 reserved_at_388[0x3]; u8 log_max_rqt[0x5]; - u8 reserved_at_38f[0x3]; + u8 reserved_at_390[0x3]; u8 log_max_rqt_size[0x5]; - u8 reserved_at_397[0x3]; + u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_at_39f[0x3]; + u8 reserved_at_3a0[0x3]; u8 log_max_stride_sz_rq[0x5]; - u8 reserved_at_3a7[0x3]; + u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; - u8 reserved_at_3af[0x3]; + u8 reserved_at_3b0[0x3]; u8 log_max_stride_sz_sq[0x5]; - u8 reserved_at_3b7[0x3]; + u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 reserved_at_3bf[0x1b]; + u8 reserved_at_3c0[0x1b]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 reserved_at_3e0[0xa]; + u8 reserved_at_3e1[0xa]; u8 log_max_vlan_list[0x5]; - u8 reserved_at_3ef[0x3]; + u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; - u8 reserved_at_3f7[0x3]; + u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; - u8 reserved_at_3ff[0x80]; + u8 reserved_at_400[0x80]; - u8 reserved_at_47f[0x3]; + u8 reserved_at_480[0x3]; u8 log_max_l2_table[0x5]; - u8 reserved_at_487[0x8]; + u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_at_49f[0x20]; + u8 reserved_at_4a0[0x20]; u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; - u8 reserved_at_4ff[0x5f]; - u8 cqe_zip[0x1]; - u8 cqe_zip_timeout[0x10]; - u8 cqe_zip_max_num[0x10]; + u8 reserved_at_500[0x80]; + + u8 reserved_at_580[0x3f]; + u8 cqe_compression[0x1]; + + u8 cqe_compression_timeout[0x10]; + u8 cqe_compression_max_num[0x10]; - u8 reserved_at_57f[0x220]; + u8 reserved_at_5e0[0x10]; + u8 tag_matching[0x1]; + u8 rndv_offload_rc[0x1]; + u8 rndv_offload_dc[0x1]; + u8 log_tag_matching_list_sz[0x5]; + u8 reserved_at_5e8[0x3]; + u8 log_max_xrq[0x5]; + + u8 reserved_at_5f0[0x200]; }; enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, + + MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, }; struct mlx5_ifc_dest_format_struct_bits { @@ -931,6 +983,20 @@ struct mlx5_ifc_dest_format_struct_bits { u8 reserved_at_20[0x20]; }; +struct mlx5_ifc_flow_counter_list_bits { + u8 clear[0x1]; + u8 num_of_counters[0xf]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_20[0x20]; +}; + +union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { + struct mlx5_ifc_dest_format_struct_bits dest_format_struct; + struct mlx5_ifc_flow_counter_list_bits flow_counter_list; + u8 reserved_at_0[0x40]; +}; + struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; @@ -997,7 +1063,13 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0x4e0]; + u8 reserved_at_120[0x15]; + u8 log_wqe_num_of_strides[0x3]; + u8 two_byte_shift_en[0x1]; + u8 reserved_at_139[0x4]; + u8 log_wqe_stride_size[0x3]; + + u8 reserved_at_140[0x4c0]; struct mlx5_ifc_cmd_pas_bits pas[0]; }; @@ -1932,7 +2004,7 @@ struct mlx5_ifc_qpc_bits { u8 reserved_at_560[0x5]; u8 rq_type[0x3]; - u8 srqn_rmpn[0x18]; + u8 srqn_rmpn_xrqn[0x18]; u8 reserved_at_580[0x8]; u8 rmsn[0x18]; @@ -1983,6 +2055,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; + struct mlx5_ifc_qos_cap_bits qos_cap; u8 reserved_at_0[0x8000]; }; @@ -1990,6 +2063,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, + MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, }; struct mlx5_ifc_flow_context_bits { @@ -2006,13 +2080,16 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_80[0x8]; u8 destination_list_size[0x18]; - u8 reserved_at_a0[0x160]; + u8 reserved_at_a0[0x8]; + u8 flow_counter_list_size[0x18]; + + u8 reserved_at_c0[0x140]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_at_1200[0x600]; - struct mlx5_ifc_dest_format_struct_bits destination[0]; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; }; enum { @@ -2194,9 +2271,11 @@ struct mlx5_ifc_sqc_bits { u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_at_4[0x4]; + u8 reserved_at_4[0x1]; + u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; - u8 reserved_at_c[0x14]; + u8 reg_umr[0x1]; + u8 reserved_at_d[0x13]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2204,8 +2283,9 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_at_60[0xa0]; + u8 reserved_at_60[0x90]; + u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; u8 reserved_at_110[0x10]; @@ -2244,7 +2324,8 @@ enum { struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_1[0x1]; + u8 scatter_fcs[0x1]; u8 vsd[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; @@ -2288,7 +2369,9 @@ struct mlx5_ifc_rmpc_bits { }; struct mlx5_ifc_nic_vport_context_bits { - u8 reserved_at_0[0x1f]; + u8 reserved_at_0[0x5]; + u8 min_wqe_inline_mode[0x3]; + u8 reserved_at_8[0x17]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; @@ -2552,7 +2635,7 @@ struct mlx5_ifc_dctc_bits { u8 reserved_at_98[0x8]; u8 reserved_at_a0[0x8]; - u8 srqn[0x18]; + u8 srqn_xrqn[0x18]; u8 reserved_at_c0[0x8]; u8 pd[0x18]; @@ -2601,6 +2684,12 @@ enum { MLX5_CQC_ST_FIRED = 0xa, }; +enum { + MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + MLX5_CQ_PERIOD_NUM_MODES +}; + struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x4]; @@ -2609,8 +2698,8 @@ struct mlx5_ifc_cqc_bits { u8 reserved_at_c[0x1]; u8 scqe_break_moderation_en[0x1]; u8 oi[0x1]; - u8 reserved_at_f[0x2]; - u8 cqe_zip_en[0x1]; + u8 cq_period_mode[0x2]; + u8 cqe_comp_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; u8 reserved_at_18[0x8]; @@ -2676,6 +2765,54 @@ struct mlx5_ifc_query_adapter_param_block_bits { u8 vsd_contd_psid[16][0x8]; }; +enum { + MLX5_XRQC_STATE_GOOD = 0x0, + MLX5_XRQC_STATE_ERROR = 0x1, +}; + +enum { + MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0, + MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1, +}; + +enum { + MLX5_XRQC_OFFLOAD_RNDV = 0x1, +}; + +struct mlx5_ifc_tag_matching_topology_context_bits { + u8 log_matching_list_sz[0x4]; + u8 reserved_at_4[0xc]; + u8 append_next_index[0x10]; + + u8 sw_phase_cnt[0x10]; + u8 hw_phase_cnt[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_xrqc_bits { + u8 state[0x4]; + u8 rlkey[0x1]; + u8 reserved_at_5[0xf]; + u8 topology[0x4]; + u8 reserved_at_18[0x4]; + u8 offload[0x4]; + + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_60[0xa0]; + + struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; + + u8 reserved_at_180[0x180]; + + struct mlx5_ifc_wq_bits wq; +}; + union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { struct mlx5_ifc_modify_field_select_bits modify_field_select; struct mlx5_ifc_resize_field_select_bits resize_field_select; @@ -2984,7 +3121,11 @@ struct mlx5_ifc_set_fte_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -3094,6 +3235,30 @@ struct mlx5_ifc_rst2init_qp_in_bits { u8 reserved_at_800[0x80]; }; +struct mlx5_ifc_query_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrqc_bits xrq_context; +}; + +struct mlx5_ifc_query_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_query_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -3497,7 +3662,27 @@ struct mlx5_ifc_query_q_counter_out_bits { u8 out_of_sequence[0x20]; - u8 reserved_at_1e0[0x620]; + u8 reserved_at_1e0[0x20]; + + u8 duplicate_request[0x20]; + + u8 reserved_at_220[0x20]; + + u8 rnr_nak_retry_err[0x20]; + + u8 reserved_at_260[0x20]; + + u8 packet_seq_err[0x20]; + + u8 reserved_at_2a0[0x20]; + + u8 implied_nak_seq_err[0x20]; + + u8 reserved_at_2e0[0x20]; + + u8 local_ack_timeout_err[0x20]; + + u8 reserved_at_320[0x4e0]; }; struct mlx5_ifc_query_q_counter_in_bits { @@ -3910,6 +4095,34 @@ struct mlx5_ifc_query_flow_group_in_bits { u8 reserved_at_e0[0x120]; }; +struct mlx5_ifc_query_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; +}; + +struct mlx5_ifc_query_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x80]; + + u8 clear[0x1]; + u8 reserved_at_c1[0xf]; + u8 num_of_counters[0x10]; + + u8 reserved_at_e0[0x10]; + u8 flow_counter_id[0x10]; +}; + struct mlx5_ifc_query_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4505,7 +4718,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_at_0[0x19]; + u8 reserved_at_0[0x16]; + u8 node_guid[0x1]; + u8 port_guid[0x1]; + u8 reserved_at_18[0x1]; u8 mtu[0x1]; u8 change_event[0x1]; u8 promisc[0x1]; @@ -4920,6 +5136,28 @@ struct mlx5_ifc_detach_from_mcg_in_bits { u8 multicast_gid[16][0x8]; }; +struct mlx5_ifc_destroy_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_destroy_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5178,7 +5416,11 @@ struct mlx5_ifc_destroy_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5205,7 +5447,11 @@ struct mlx5_ifc_destroy_flow_group_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5346,7 +5592,11 @@ struct mlx5_ifc_delete_fte_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5471,6 +5721,52 @@ struct mlx5_ifc_dealloc_pd_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_dealloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrqc_bits xrq_context; +}; + struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5792,7 +6088,11 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -5836,7 +6136,11 @@ struct mlx5_ifc_create_flow_group_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -6004,6 +6308,29 @@ struct mlx5_ifc_attach_to_mcg_in_bits { u8 multicast_gid[16][0x8]; }; +struct mlx5_ifc_arm_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_arm_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x10]; + u8 lwm[0x10]; +}; + struct mlx5_ifc_arm_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6041,7 +6368,8 @@ struct mlx5_ifc_arm_rq_out_bits { }; enum { - MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1, + MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1, + MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2, }; struct mlx5_ifc_arm_rq_in_bits { @@ -6190,6 +6518,28 @@ struct mlx5_ifc_alloc_pd_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_alloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_add_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6212,6 +6562,30 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 vxlan_udp_port[0x10]; }; +struct mlx5_ifc_set_rate_limit_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_rate_limit_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 rate_limit_index[0x10]; + + u8 reserved_at_60[0x20]; + + u8 rate_limit[0x20]; +}; + struct mlx5_ifc_access_register_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6336,12 +6710,16 @@ struct mlx5_ifc_pude_reg_bits { }; struct mlx5_ifc_ptys_reg_bits { - u8 reserved_at_0[0x8]; + u8 reserved_at_0[0x1]; + u8 an_disable_admin[0x1]; + u8 an_disable_cap[0x1]; + u8 reserved_at_3[0x5]; u8 local_port[0x8]; u8 reserved_at_10[0xd]; u8 proto_mask[0x3]; - u8 reserved_at_20[0x40]; + u8 an_status[0x4]; + u8 reserved_at_24[0x3c]; u8 eth_proto_capability[0x20]; @@ -6369,6 +6747,17 @@ struct mlx5_ifc_ptys_reg_bits { u8 reserved_at_1a0[0x60]; }; +struct mlx5_ifc_mlcr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x20]; + + u8 beacon_duration[0x10]; + u8 reserved_at_40[0x10]; + + u8 beacon_remain[0x10]; +}; + struct mlx5_ifc_ptas_reg_bits { u8 reserved_at_0[0x20]; @@ -6778,6 +7167,16 @@ struct mlx5_ifc_pamp_reg_bits { u8 index_data[18][0x10]; }; +struct mlx5_ifc_pcmr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x2e]; + u8 fcs_cap[0x1]; + u8 reserved_at_3f[0x1f]; + u8 fcs_chk[0x1]; + u8 reserved_at_5f[0x1]; +}; + struct mlx5_ifc_lane_2_module_mapping_bits { u8 reserved_at_0[0x6]; u8 rx_lane[0x2]; @@ -7114,6 +7513,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pspa_reg_bits pspa_reg; struct mlx5_ifc_ptas_reg_bits ptas_reg; struct mlx5_ifc_ptys_reg_bits ptys_reg; + struct mlx5_ifc_mlcr_reg_bits mlcr_reg; struct mlx5_ifc_pude_reg_bits pude_reg; struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; @@ -7147,7 +7547,11 @@ struct mlx5_ifc_set_flow_table_root_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; @@ -7178,7 +7582,9 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x20]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 modify_field_select[0x10]; @@ -7244,4 +7650,64 @@ struct mlx5_ifc_qtct_reg_bits { u8 tclass[0x3]; }; +struct mlx5_ifc_mcia_reg_bits { + u8 l[0x1]; + u8 reserved_at_1[0x7]; + u8 module[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 i2c_device_address[0x8]; + u8 page_number[0x8]; + u8 device_address[0x10]; + + u8 reserved_at_40[0x10]; + u8 size[0x10]; + + u8 reserved_at_60[0x20]; + + u8 dword_0[0x20]; + u8 dword_1[0x20]; + u8 dword_2[0x20]; + u8 dword_3[0x20]; + u8 dword_4[0x20]; + u8 dword_5[0x20]; + u8 dword_6[0x20]; + u8 dword_7[0x20]; + u8 dword_8[0x20]; + u8 dword_9[0x20]; + u8 dword_10[0x20]; + u8 dword_11[0x20]; +}; + +struct mlx5_ifc_dcbx_param_bits { + u8 dcbx_cee_cap[0x1]; + u8 dcbx_ieee_cap[0x1]; + u8 dcbx_standby_cap[0x1]; + u8 reserved_at_0[0x5]; + u8 port_number[0x8]; + u8 reserved_at_10[0xa]; + u8 max_application_table_size[6]; + u8 reserved_at_20[0x15]; + u8 version_oper[0x3]; + u8 reserved_at_38[5]; + u8 version_admin[0x3]; + u8 willing_admin[0x1]; + u8 reserved_at_41[0x3]; + u8 pfc_cap_oper[0x4]; + u8 reserved_at_48[0x4]; + u8 pfc_cap_admin[0x4]; + u8 reserved_at_50[0x4]; + u8 num_of_tc_oper[0x4]; + u8 reserved_at_58[0x4]; + u8 num_of_tc_admin[0x4]; + u8 remote_willing[0x1]; + u8 reserved_at_61[3]; + u8 remote_pfc_cap[4]; + u8 reserved_at_68[0x14]; + u8 remote_num_of_tc[0x4]; + u8 reserved_at_80[0x18]; + u8 error[0x8]; + u8 reserved_at_a0[0x160]; +}; #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index b30250ab7604..e3012cc64b8a 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -35,6 +35,32 @@ #include <linux/mlx5/driver.h> +enum mlx5_beacon_duration { + MLX5_BEACON_DURATION_OFF = 0x0, + MLX5_BEACON_DURATION_INF = 0xffff, +}; + +enum mlx5_module_id { + MLX5_MODULE_ID_SFP = 0x3, + MLX5_MODULE_ID_QSFP = 0xC, + MLX5_MODULE_ID_QSFP_PLUS = 0xD, + MLX5_MODULE_ID_QSFP28 = 0x11, +}; + +enum mlx5_an_status { + MLX5_AN_UNAVAILABLE = 0, + MLX5_AN_COMPLETE = 1, + MLX5_AN_FAILED = 2, + MLX5_AN_LINK_UP = 3, + MLX5_AN_LINK_DOWN = 4, +}; + +#define MLX5_EEPROM_MAX_BYTES 32 +#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff +#define MLX5_I2C_ADDR_LOW 0x50 +#define MLX5_I2C_ADDR_HIGH 0x51 +#define MLX5_EEPROM_PAGE_LENGTH 256 + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); @@ -47,12 +73,17 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, u8 *proto_oper, int proto_mask, u8 local_port); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask); +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask); +void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); +int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); @@ -84,4 +115,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); +int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); +void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, + bool *enabled); +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data); + #endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index cf031a3f16c5..7879bf411891 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -172,6 +172,7 @@ enum { enum { MLX5_FENCE_MODE_NONE = 0 << 5, MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, + MLX5_FENCE_MODE_FENCE = 2 << 5, MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, }; @@ -460,10 +461,9 @@ struct mlx5_core_qp { }; struct mlx5_qp_path { - u8 fl; + u8 fl_free_ar; u8 rsvd3; - u8 free_ar; - u8 pkey_index; + __be16 pkey_index; u8 rsvd0; u8 grh_mlid; __be16 rlid; @@ -556,10 +556,11 @@ struct mlx5_destroy_qp_mbox_out { struct mlx5_modify_qp_mbox_in { struct mlx5_inbox_hdr hdr; __be32 qpn; - u8 rsvd1[4]; - __be32 optparam; u8 rsvd0[4]; + __be32 optparam; + u8 rsvd1[4]; struct mlx5_qp_context ctx; + u8 rsvd2[16]; }; struct mlx5_modify_qp_mbox_out { @@ -668,6 +669,12 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *sq); void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, struct mlx5_core_qp *sq); +int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); +int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); +int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, + int reset, void *out, int out_size); +int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, + u32 *out_of_buffer); static inline const char *mlx5_qp_type_str(int type) { diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index f43ed054a3e0..33c97dc900f8 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h @@ -35,6 +35,31 @@ #include <linux/mlx5/driver.h> +enum { + MLX5_SRQ_FLAG_ERR = (1 << 0), + MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), +}; + +struct mlx5_srq_attr { + u32 type; + u32 flags; + u32 log_size; + u32 wqe_shift; + u32 log_page_size; + u32 wqe_cnt; + u32 srqn; + u32 xrcd; + u32 page_offset; + u32 cqn; + u32 pd; + u32 lwm; + u32 user_index; + u64 db_record; + u64 *pas; +}; + +struct mlx5_core_dev; + void mlx5_init_srq_table(struct mlx5_core_dev *dev); void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 301da4a5e6bf..e087b7d047ac 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -43,6 +43,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); +void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, u16 vport, u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); @@ -50,6 +52,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u64 *system_image_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); +int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, + u32 vport, u64 node_guid); int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, u16 *qkey_viol_cntr); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, diff --git a/include/linux/mm.h b/include/linux/mm.h index 8f468e0d2534..ef815b9cd426 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -72,6 +72,10 @@ extern int mmap_rnd_compat_bits __read_mostly; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif +#ifndef page_to_virt +#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) +#endif + /* * To prevent common memory management code establishing * a zero page mapping on a read fault. @@ -299,10 +303,40 @@ struct vm_fault { * is set (which is also implied by * VM_FAULT_ERROR). */ - /* for ->map_pages() only */ - pgoff_t max_pgoff; /* map pages for offset from pgoff till - * max_pgoff inclusive */ - pte_t *pte; /* pte entry associated with ->pgoff */ + void *entry; /* ->fault handler can alternatively + * return locked DAX entry. In that + * case handler should return + * VM_FAULT_DAX_LOCKED and fill in + * entry here. + */ +}; + +/* + * Page fault context: passes though page fault handler instead of endless list + * of function arguments. + */ +struct fault_env { + struct vm_area_struct *vma; /* Target VMA */ + unsigned long address; /* Faulting virtual address */ + unsigned int flags; /* FAULT_FLAG_xxx flags */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' + */ + pte_t *pte; /* Pointer to pte entry matching + * the 'address'. NULL if the page + * table hasn't been allocated. + */ + spinlock_t *ptl; /* Page table lock. + * Protects pte page table if 'pte' + * is not NULL, otherwise pmd. + */ + pgtable_t prealloc_pte; /* Pre-allocated pte page table. + * vm_ops->map_pages() calls + * alloc_set_pte() from atomic context. + * do_fault_around() pre-allocates + * page table to avoid allocation from + * atomic context. + */ }; /* @@ -317,7 +351,8 @@ struct vm_operations_struct { int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*pmd_fault)(struct vm_area_struct *, unsigned long address, pmd_t *, unsigned int flags); - void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); + void (*map_pages)(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ @@ -443,14 +478,14 @@ unsigned long vmalloc_to_pfn(const void *addr); * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ -static inline int is_vmalloc_addr(const void *x) +static inline bool is_vmalloc_addr(const void *x) { #ifdef CONFIG_MMU unsigned long addr = (unsigned long)x; return addr >= VMALLOC_START && addr < VMALLOC_END; #else - return 0; + return false; #endif } #ifdef CONFIG_MMU @@ -471,8 +506,7 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page) static inline int compound_mapcount(struct page *page) { - if (!PageCompound(page)) - return 0; + VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); return atomic_read(compound_mapcount_ptr(page)) + 1; } @@ -528,7 +562,6 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); -int split_free_page(struct page *page); /* * Compound pages have a destructor function. Provide a @@ -592,8 +625,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon); +int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, + struct page *page); #endif /* @@ -730,7 +763,7 @@ static inline void get_page(struct page *page) page = compound_head(page); /* * Getting a normal page or the head of a compound page - * requires to already have an elevated page->_count. + * requires to already have an elevated page->_refcount. */ VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); page_ref_inc(page); @@ -846,10 +879,7 @@ extern int page_cpupid_xchg_last(struct page *page, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { - int cpupid = (1 << LAST_CPUPID_SHIFT) - 1; - - page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); - page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; + page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ #else /* !CONFIG_NUMA_BALANCING */ @@ -903,6 +933,11 @@ static inline struct zone *page_zone(const struct page *page) return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; } +static inline pg_data_t *page_pgdat(const struct page *page) +{ + return NODE_DATA(page_to_nid(page)); +} + #ifdef SECTION_IN_PAGE_FLAGS static inline void set_page_section(struct page *page, unsigned long section) { @@ -943,11 +978,21 @@ static inline struct mem_cgroup *page_memcg(struct page *page) { return page->mem_cgroup; } +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return READ_ONCE(page->mem_cgroup); +} #else static inline struct mem_cgroup *page_memcg(struct page *page) { return NULL; } +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return NULL; +} #endif /* @@ -957,7 +1002,7 @@ static inline struct mem_cgroup *page_memcg(struct page *page) static __always_inline void *lowmem_page_address(const struct page *page) { - return __va(PFN_PHYS(page_to_pfn(page))); + return page_to_virt(page); } #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) @@ -1028,26 +1073,8 @@ static inline pgoff_t page_file_index(struct page *page) return page->index; } -/* - * Return true if this page is mapped into pagetables. - * For compound page it returns true if any subpage of compound page is mapped. - */ -static inline bool page_mapped(struct page *page) -{ - int i; - if (likely(!PageCompound(page))) - return atomic_read(&page->_mapcount) >= 0; - page = compound_head(page); - if (atomic_read(compound_mapcount_ptr(page)) >= 0) - return true; - if (PageHuge(page)) - return false; - for (i = 0; i < hpage_nr_pages(page); i++) { - if (atomic_read(&page[i]._mapcount) >= 0) - return true; - } - return false; -} +bool page_mapped(struct page *page); +struct address_space *page_mapping(struct page *page); /* * Return true only if the page has been allocated with @@ -1095,6 +1122,7 @@ static inline void clear_page_pfmemalloc(struct page *page) #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ +#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ @@ -1227,15 +1255,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU -extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags); +extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); #else -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - unsigned int flags) +static inline int handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) { /* should never happen if there's no MMU */ BUG(); @@ -1782,7 +1809,7 @@ extern void free_highmem_page(struct page *page); extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); -extern void reserve_bootmem_region(unsigned long start, unsigned long end); +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void __free_reserved_page(struct page *page) @@ -1987,6 +2014,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm); extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); +extern struct file *get_task_exe_file(struct task_struct *task); extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); @@ -2030,9 +2058,9 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {} #endif /* These take the mm semaphore themselves */ -extern unsigned long vm_brk(unsigned long, unsigned long); +extern int __must_check vm_brk(unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); -extern unsigned long vm_mmap(struct file *, unsigned long, +extern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); @@ -2075,7 +2103,8 @@ extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); -extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); +extern void filemap_map_pages(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); /* mm/page-writeback.c */ @@ -2271,6 +2300,8 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) } #endif /* __HAVE_ARCH_GATE_AREA */ +extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); + #ifdef CONFIG_SYSCTL extern int sysctl_drop_caches; int drop_caches_sysctl_handler(struct ctl_table *, int, @@ -2405,6 +2436,9 @@ static inline bool page_is_guard(struct page *page) return false; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return false; + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); } #else diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 712e8c37a200..71613e8a720f 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -22,22 +22,39 @@ static inline int page_is_file_cache(struct page *page) return !PageSwapBacked(page); } +static __always_inline void __update_lru_size(struct lruvec *lruvec, + enum lru_list lru, enum zone_type zid, + int nr_pages) +{ + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + + __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); + __mod_zone_page_state(&pgdat->node_zones[zid], + NR_ZONE_LRU_BASE + lru, nr_pages); +} + +static __always_inline void update_lru_size(struct lruvec *lruvec, + enum lru_list lru, enum zone_type zid, + int nr_pages) +{ + __update_lru_size(lruvec, lru, zid, nr_pages); +#ifdef CONFIG_MEMCG + mem_cgroup_update_lru_size(lruvec, lru, nr_pages); +#endif +} + static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - int nr_pages = hpage_nr_pages(page); - mem_cgroup_update_lru_size(lruvec, lru, nr_pages); + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); - __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); } static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - int nr_pages = hpage_nr_pages(page); - mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); list_del(&page->lru); - __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); + update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); } /** diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c2d75b4fa86c..903200f4ec41 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -12,6 +12,7 @@ #include <linux/cpumask.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> +#include <linux/workqueue.h> #include <asm/page.h> #include <asm/mmu.h> @@ -59,47 +60,52 @@ struct page { }; /* Second double word */ - struct { - union { - pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* sl[aou]b first free object */ - /* page_deferred_list().prev -- second tail page */ - }; + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* sl[aou]b first free object */ + /* page_deferred_list().prev -- second tail page */ + }; - union { + union { #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - /* Used for cmpxchg_double in slub */ - unsigned long counters; + /* Used for cmpxchg_double in slub */ + unsigned long counters; #else - /* - * Keep _count separate from slub cmpxchg_double data. - * As the rest of the double word is protected by - * slab_lock but _count is not. - */ - unsigned counters; + /* + * Keep _refcount separate from slub cmpxchg_double data. + * As the rest of the double word is protected by slab_lock + * but _refcount is not. + */ + unsigned counters; #endif + struct { - struct { - - union { - /* - * Count of ptes mapped in mms, to show - * when page is mapped & limit reverse - * map searches. - */ - atomic_t _mapcount; - - struct { /* SLUB */ - unsigned inuse:16; - unsigned objects:15; - unsigned frozen:1; - }; - int units; /* SLOB */ + union { + /* + * Count of ptes mapped in mms, to show when + * page is mapped & limit reverse map searches. + * + * Extra information about page type may be + * stored here for pages that are never mapped, + * in which case the value MUST BE <= -2. + * See page-flags.h for more details. + */ + atomic_t _mapcount; + + unsigned int active; /* SLAB */ + struct { /* SLUB */ + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; }; - atomic_t _count; /* Usage count, see below. */ + int units; /* SLOB */ }; - unsigned int active; /* SLAB */ + /* + * Usage count, *USE WRAPPER FUNCTION* when manual + * accounting. See page_ref.h + */ + atomic_t _refcount; }; }; @@ -112,7 +118,7 @@ struct page { */ union { struct list_head lru; /* Pageout list, eg. active_list - * protected by zone->lru_lock ! + * protected by zone_lru_lock ! * Can be used as a generic list * by the page owner. */ @@ -248,7 +254,7 @@ struct page_frag_cache { __u32 offset; #endif /* we maintain a pagecount bias, so that we dont dirty cache line - * containing page->_count every time we allocate a fragment. + * containing page->_refcount every time we allocate a fragment. */ unsigned int pagecnt_bias; bool pfmemalloc; @@ -509,6 +515,9 @@ struct mm_struct { #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif +#ifdef CONFIG_MMU + struct work_struct async_put_work; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) @@ -586,6 +595,9 @@ struct vm_special_mapping { int (*fault)(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf); + + int (*mremap)(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma); }; enum tlb_flush_reason { diff --git a/include/linux/mman.h b/include/linux/mman.h index 33e17f6a327a..634c4c51fe3a 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -49,7 +49,7 @@ static inline void vm_unacct_memory(long pages) * * Returns true if the prot flags are valid */ -static inline int arch_validate_prot(unsigned long prot) +static inline bool arch_validate_prot(unsigned long prot) { return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; } diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index eb0151bac50c..d8673ca968ba 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -95,6 +95,7 @@ struct mmc_ext_csd { u8 raw_partition_support; /* 160 */ u8 raw_rpmb_size_mult; /* 168 */ u8 raw_erased_mem_count; /* 181 */ + u8 strobe_support; /* 184 */ u8 raw_ext_csd_structure; /* 194 */ u8 raw_card_type; /* 196 */ u8 raw_driver_strength; /* 197 */ @@ -279,6 +280,7 @@ struct mmc_card { #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ +#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ unsigned int erase_size; /* erase size in sectors */ @@ -353,6 +355,9 @@ struct mmc_fixup { /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */ u16 cis_vendor, cis_device; + /* for MMC cards */ + unsigned int ext_csd_rev; + void (*vendor_fixup)(struct mmc_card *card, int data); int data; }; @@ -361,11 +366,20 @@ struct mmc_fixup { #define CID_OEMID_ANY ((unsigned short) -1) #define CID_NAME_ANY (NULL) +#define EXT_CSD_REV_ANY (-1u) + +#define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_TOSHIBA 0x11 +#define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 +#define CID_MANFID_KINGSTON 0x70 +#define CID_MANFID_HYNIX 0x90 + #define END_FIXUP { NULL } #define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ _cis_vendor, _cis_device, \ - _fixup, _data) \ + _fixup, _data, _ext_csd_rev) \ { \ .name = (_name), \ .manfid = (_manfid), \ @@ -376,23 +390,30 @@ struct mmc_fixup { .cis_device = (_cis_device), \ .vendor_fixup = (_fixup), \ .data = (_data), \ + .ext_csd_rev = (_ext_csd_rev), \ } #define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \ - _fixup, _data) \ + _fixup, _data, _ext_csd_rev) \ _FIXUP_EXT(_name, _manfid, \ _oemid, _rev_start, _rev_end, \ SDIO_ANY_ID, SDIO_ANY_ID, \ - _fixup, _data) \ + _fixup, _data, _ext_csd_rev) \ #define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \ - MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data) + MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \ + EXT_CSD_REV_ANY) + +#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \ + _ext_csd_rev) \ + MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \ + _ext_csd_rev) #define SDIO_FIXUP(_vendor, _device, _fixup, _data) \ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \ CID_OEMID_ANY, 0, -1ull, \ _vendor, _device, \ - _fixup, _data) \ + _fixup, _data, EXT_CSD_REV_ANY) \ #define cid_rev(hwrev, fwrev, year, month) \ (((u64) hwrev) << 40 | \ @@ -511,6 +532,11 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c) return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING; } +static inline int mmc_card_broken_hpi(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_HPI; +} + #define mmc_card_name(c) ((c)->cid.prod_name) #define mmc_card_id(c) (dev_name(&(c)->dev)) diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 7b41c6db1bb6..83b0edfce471 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -36,7 +36,6 @@ enum { EVENT_XFER_COMPLETE, EVENT_DATA_COMPLETE, EVENT_DATA_ERROR, - EVENT_XFER_ERROR }; struct mmc_data; @@ -55,6 +54,7 @@ struct dw_mci_dma_slave { /** * struct dw_mci - MMC controller state shared between all slots * @lock: Spinlock protecting the queue and associated data. + * @irq_lock: Spinlock protecting the INTMASK setting. * @regs: Pointer to MMIO registers. * @fifo_reg: Pointer to MMIO registers for data FIFO * @sg: Scatterlist entry currently being processed by PIO code, if any. @@ -65,6 +65,9 @@ struct dw_mci_dma_slave { * @cmd: The command currently being sent to the card, or NULL. * @data: The data currently being transferred, or NULL if no data * transfer is in progress. + * @stop_abort: The command currently prepared for stoping transfer. + * @prev_blksz: The former transfer blksz record. + * @timing: Record of current ios timing. * @use_dma: Whether DMA channel is initialized or not. * @using_dma: Whether DMA is in use for the current transfer. * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. @@ -72,7 +75,10 @@ struct dw_mci_dma_slave { * @sg_cpu: Virtual address of DMA buffer. * @dma_ops: Pointer to platform-specific DMA callbacks. * @cmd_status: Snapshot of SR taken upon completion of the current + * @ring_size: Buffer size for idma descriptors. * command. Only valid when EVENT_CMD_COMPLETE is pending. + * @dms: structure of slave-dma private data. + * @phy_regs: physical address of controller's register map * @data_status: Snapshot of SR taken upon completion of the current * data transfer. Only valid when EVENT_DATA_COMPLETE or * EVENT_DATA_ERROR is pending. @@ -80,7 +86,6 @@ struct dw_mci_dma_slave { * to be sent. * @dir_status: Direction of current transfer. * @tasklet: Tasklet running the request state machine. - * @card_tasklet: Tasklet handling card detect. * @pending_events: Bitmask of events flagged by the interrupt handler * to be processed by the tasklet. * @completed_events: Bitmask of events which the state machine has @@ -91,6 +96,7 @@ struct dw_mci_dma_slave { * rate and timeout calculations. * @current_speed: Configured rate of the controller. * @num_slots: Number of slots available. + * @fifoth_val: The value of FIFOTH register. * @verid: Denote Version ID. * @dev: Device associated with the MMC controller. * @pdata: Platform data associated with the MMC controller. @@ -106,10 +112,11 @@ struct dw_mci_dma_slave { * @part_buf: Simple buffer for partial fifo reads/writes. * @push_data: Pointer to FIFO push function. * @pull_data: Pointer to FIFO pull function. - * @quirks: Set of quirks that apply to specific versions of the IP. + * @vqmmc_enabled: Status of vqmmc, should be true or false. * @irq_flags: The flags to be passed to request_irq. * @irq: The irq value to be passed to request_irq. * @sdio_id0: Number of slot0 in the SDIO interrupt registers. + * @cmd11_timer: Timer for SD3.0 voltage switch over scheme. * @dto_timer: Timer for broken data transfer over scheme. * * Locking @@ -210,9 +217,6 @@ struct dw_mci { void (*push_data)(struct dw_mci *host, void *buf, int cnt); void (*pull_data)(struct dw_mci *host, void *buf, int cnt); - /* Workaround flags */ - u32 quirks; - bool vqmmc_enabled; unsigned long irq_flags; /* IRQ flags */ int irq; @@ -234,17 +238,12 @@ struct dw_mci_dma_ops { void (*exit)(struct dw_mci *host); }; -/* IP Quirks/flags. */ -/* Timer for broken data transfer over scheme */ -#define DW_MCI_QUIRK_BROKEN_DTO BIT(0) - struct dma_pdata; /* Board platform data */ struct dw_mci_board { u32 num_slots; - u32 quirks; /* Workaround / Quirk flags */ unsigned int bus_hz; /* Clock speed at the cclk_in pad */ u32 caps; /* Capabilities */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 8dd4d290ab0d..aa4bfbf129e4 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -19,6 +19,7 @@ #include <linux/mmc/core.h> #include <linux/mmc/card.h> +#include <linux/mmc/mmc.h> #include <linux/mmc/pm.h> struct mmc_ios { @@ -77,6 +78,8 @@ struct mmc_ios { #define MMC_SET_DRIVER_TYPE_A 1 #define MMC_SET_DRIVER_TYPE_C 2 #define MMC_SET_DRIVER_TYPE_D 3 + + bool enhanced_strobe; /* hs400es selection */ }; struct mmc_host_ops { @@ -93,28 +96,39 @@ struct mmc_host_ops { void (*pre_req)(struct mmc_host *host, struct mmc_request *req, bool is_first_req); void (*request)(struct mmc_host *host, struct mmc_request *req); + + /* + * Avoid calling the next three functions too often or in a "fast + * path", since underlaying controller might implement them in an + * expensive and/or slow way. Also note that these functions might + * sleep, so don't call them in the atomic contexts! + */ + + /* + * Notes to the set_ios callback: + * ios->clock might be 0. For some controllers, setting 0Hz + * as any other frequency works. However, some controllers + * explicitly need to disable the clock. Otherwise e.g. voltage + * switching might fail because the SDCLK is not really quiet. + */ + void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); + /* - * Avoid calling these three functions too often or in a "fast path", - * since underlaying controller might implement them in an expensive - * and/or slow way. - * - * Also note that these functions might sleep, so don't call them - * in the atomic contexts! - * * Return values for the get_ro callback should be: * 0 for a read/write card * 1 for a read-only card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened - * + */ + int (*get_ro)(struct mmc_host *host); + + /* * Return values for the get_cd callback should be: * 0 for a absent card * 1 for a present card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened */ - void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); - int (*get_ro)(struct mmc_host *host); int (*get_cd)(struct mmc_host *host); void (*enable_sdio_irq)(struct mmc_host *host, int enable); @@ -132,6 +146,9 @@ struct mmc_host_ops { /* Prepare HS400 target operating frequency depending host driver */ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); + /* Prepare enhanced strobe depending host driver */ + void (*hs400_enhanced_strobe)(struct mmc_host *host, + struct mmc_ios *ios); int (*select_drive_strength)(struct mmc_card *card, unsigned int max_dtr, int host_drv, int card_drv, int *drv_type); @@ -291,6 +308,9 @@ struct mmc_host { #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */ #define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */ +#define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */ +#define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */ +#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */ mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -318,6 +338,7 @@ struct mmc_host { unsigned int can_retune:1; /* re-tuning can be used */ unsigned int doing_retune:1; /* re-tuning in progress */ unsigned int retune_now:1; /* do re-tuning at next req */ + unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ @@ -501,6 +522,11 @@ static inline bool mmc_card_hs400(struct mmc_card *card) return card->host->ios.timing == MMC_TIMING_MMC_HS400; } +static inline bool mmc_card_hs400es(struct mmc_card *card) +{ + return card->host->ios.enhanced_strobe; +} + void mmc_retune_timer_stop(struct mmc_host *host); static inline void mmc_retune_needed(struct mmc_host *host) @@ -515,4 +541,7 @@ static inline void mmc_retune_recheck(struct mmc_host *host) host->retune_now = 1; } +void mmc_retune_pause(struct mmc_host *host); +void mmc_retune_unpause(struct mmc_host *host); + #endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 15f2c4a0a62c..c376209c70ef 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -297,6 +297,7 @@ struct _mmc_csd { #define EXT_CSD_PART_CONFIG 179 /* R/W */ #define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ #define EXT_CSD_BUS_WIDTH 183 /* R/W */ +#define EXT_CSD_STROBE_SUPPORT 184 /* RO */ #define EXT_CSD_HS_TIMING 185 /* R/W */ #define EXT_CSD_POWER_CLASS 187 /* R/W */ #define EXT_CSD_REV 192 /* RO */ @@ -380,12 +381,14 @@ struct _mmc_csd { #define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */ #define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \ EXT_CSD_CARD_TYPE_HS400_1_2V) +#define EXT_CSD_CARD_TYPE_HS400ES (1<<8) /* Card can run at HS400ES */ #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ #define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ #define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */ #define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */ +#define EXT_CSD_BUS_WIDTH_STROBE BIT(7) /* Enhanced strobe mode */ #define EXT_CSD_TIMING_BC 0 /* Backwards compatility */ #define EXT_CSD_TIMING_HS 1 /* High speed */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 83430f2ea757..0d126aeb3ec0 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -36,6 +36,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h deleted file mode 100644 index 95d6f0314a7d..000000000000 --- a/include/linux/mmc/sh_mobile_sdhi.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef LINUX_MMC_SH_MOBILE_SDHI_H -#define LINUX_MMC_SH_MOBILE_SDHI_H - -#include <linux/types.h> - -#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect" -#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" -#define SH_MOBILE_SDHI_IRQ_SDIO "sdio" - -#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */ diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h deleted file mode 100644 index 5f5cd80e9765..000000000000 --- a/include/linux/mmc/tmio.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * include/linux/mmc/tmio.h - * - * Copyright (C) 2016 Sang Engineering, Wolfram Sang - * Copyright (C) 2015-16 Renesas Electronics Corporation - * Copyright (C) 2007 Ian Molton - * Copyright (C) 2004 Ian Molton - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Driver for the MMC / SD / SDIO cell found in: - * - * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 - */ -#ifndef LINUX_MMC_TMIO_H -#define LINUX_MMC_TMIO_H - -#define CTL_SD_CMD 0x00 -#define CTL_ARG_REG 0x04 -#define CTL_STOP_INTERNAL_ACTION 0x08 -#define CTL_XFER_BLK_COUNT 0xa -#define CTL_RESPONSE 0x0c -#define CTL_STATUS 0x1c -#define CTL_STATUS2 0x1e -#define CTL_IRQ_MASK 0x20 -#define CTL_SD_CARD_CLK_CTL 0x24 -#define CTL_SD_XFER_LEN 0x26 -#define CTL_SD_MEM_CARD_OPT 0x28 -#define CTL_SD_ERROR_DETAIL_STATUS 0x2c -#define CTL_SD_DATA_PORT 0x30 -#define CTL_TRANSACTION_CTL 0x34 -#define CTL_SDIO_STATUS 0x36 -#define CTL_SDIO_IRQ_MASK 0x38 -#define CTL_DMA_ENABLE 0xd8 -#define CTL_RESET_SD 0xe0 -#define CTL_VERSION 0xe2 -#define CTL_SDIO_REGS 0x100 -#define CTL_CLK_AND_WAIT_CTL 0x138 -#define CTL_RESET_SDIO 0x1e0 - -/* Definitions for values the CTRL_STATUS register can take. */ -#define TMIO_STAT_CMDRESPEND 0x00000001 -#define TMIO_STAT_DATAEND 0x00000004 -#define TMIO_STAT_CARD_REMOVE 0x00000008 -#define TMIO_STAT_CARD_INSERT 0x00000010 -#define TMIO_STAT_SIGSTATE 0x00000020 -#define TMIO_STAT_WRPROTECT 0x00000080 -#define TMIO_STAT_CARD_REMOVE_A 0x00000100 -#define TMIO_STAT_CARD_INSERT_A 0x00000200 -#define TMIO_STAT_SIGSTATE_A 0x00000400 -#define TMIO_STAT_CMD_IDX_ERR 0x00010000 -#define TMIO_STAT_CRCFAIL 0x00020000 -#define TMIO_STAT_STOPBIT_ERR 0x00040000 -#define TMIO_STAT_DATATIMEOUT 0x00080000 -#define TMIO_STAT_RXOVERFLOW 0x00100000 -#define TMIO_STAT_TXUNDERRUN 0x00200000 -#define TMIO_STAT_CMDTIMEOUT 0x00400000 -#define TMIO_STAT_RXRDY 0x01000000 -#define TMIO_STAT_TXRQ 0x02000000 -#define TMIO_STAT_ILL_FUNC 0x20000000 -#define TMIO_STAT_CMD_BUSY 0x40000000 -#define TMIO_STAT_ILL_ACCESS 0x80000000 - -#define CLK_CTL_DIV_MASK 0xff -#define CLK_CTL_SCLKEN BIT(8) - -#define TMIO_BBS 512 /* Boot block size */ - -#endif /* LINUX_MMC_TMIO_H */ diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index de7be78c6f0e..451a811f48f2 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -39,6 +39,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) @@ -47,6 +48,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif #ifdef CONFIG_DEBUG_VIRTUAL diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index 70fffeba7495..a4441784503b 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -1,9 +1,16 @@ #ifndef _LINUX_MMU_CONTEXT_H #define _LINUX_MMU_CONTEXT_H +#include <asm/mmu_context.h> + struct mm_struct; void use_mm(struct mm_struct *mm); void unuse_mm(struct mm_struct *mm); +/* Architectures that care about IRQ state in switch_mm can override this. */ +#ifndef switch_mm_irqs_off +# define switch_mm_irqs_off switch_mm +#endif + #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c60df9257cc7..7f2ae99e5daf 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES]; #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) +# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) #else # define is_migrate_cma(migratetype) false +# define is_migrate_cma_page(_page) false #endif #define for_each_migratetype_order(order, type) \ @@ -85,13 +87,6 @@ extern int page_group_by_mobility_disabled; get_pfnblock_flags_mask(page, page_to_pfn(page), \ PB_migrate_end, MIGRATETYPE_MASK) -static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) -{ - BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); - return get_pfnblock_flags_mask(page, pfn, PB_migrate_end, - MIGRATETYPE_MASK); -} - struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; @@ -100,7 +95,7 @@ struct free_area { struct pglist_data; /* - * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. + * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. * So add a wild amount of padding here to ensure that they fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. @@ -117,36 +112,23 @@ struct zone_padding { enum zone_stat_item { /* First 128 byte cacheline (assuming 64 bit words) */ NR_FREE_PAGES, - NR_ALLOC_BATCH, - NR_LRU_BASE, - NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ - NR_ACTIVE_ANON, /* " " " " " */ - NR_INACTIVE_FILE, /* " " " " " */ - NR_ACTIVE_FILE, /* " " " " " */ - NR_UNEVICTABLE, /* " " " " " */ + NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ + NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, + NR_ZONE_ACTIVE_ANON, + NR_ZONE_INACTIVE_FILE, + NR_ZONE_ACTIVE_FILE, + NR_ZONE_UNEVICTABLE, + NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ - NR_ANON_PAGES, /* Mapped anonymous pages */ - NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. - only modified from process context */ - NR_FILE_PAGES, - NR_FILE_DIRTY, - NR_WRITEBACK, NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ - NR_KERNEL_STACK, + NR_KERNEL_STACK_KB, /* measured in KiB */ /* Second 128 byte cacheline */ - NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, - NR_VMSCAN_WRITE, - NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ - NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ - NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ - NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ - NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ - NR_DIRTIED, /* page dirtyings since bootup */ - NR_WRITTEN, /* page writings since bootup */ - NR_PAGES_SCANNED, /* pages scanned since last reclaim */ +#if IS_ENABLED(CONFIG_ZSMALLOC) + NR_ZSPAGES, /* allocated in zsmalloc */ +#endif #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ @@ -155,12 +137,40 @@ enum zone_stat_item { NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ #endif + NR_FREE_CMA_PAGES, + NR_VM_ZONE_STAT_ITEMS }; + +enum node_stat_item { + NR_LRU_BASE, + NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ + NR_ACTIVE_ANON, /* " " " " " */ + NR_INACTIVE_FILE, /* " " " " " */ + NR_ACTIVE_FILE, /* " " " " " */ + NR_UNEVICTABLE, /* " " " " " */ + NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ + NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ + NR_PAGES_SCANNED, /* pages scanned since last reclaim */ WORKINGSET_REFAULT, WORKINGSET_ACTIVATE, WORKINGSET_NODERECLAIM, - NR_ANON_TRANSPARENT_HUGEPAGES, - NR_FREE_CMA_PAGES, - NR_VM_ZONE_STAT_ITEMS }; + NR_ANON_MAPPED, /* Mapped anonymous pages */ + NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. + only modified from process context */ + NR_FILE_PAGES, + NR_FILE_DIRTY, + NR_WRITEBACK, + NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ + NR_SHMEM_THPS, + NR_SHMEM_PMDMAPPED, + NR_ANON_THPS, + NR_UNSTABLE_NFS, /* NFS unstable pages */ + NR_VMSCAN_WRITE, + NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ + NR_DIRTIED, /* page dirtyings since bootup */ + NR_WRITTEN, /* page writings since bootup */ + NR_VM_NODE_STAT_ITEMS +}; /* * We do arithmetic on the LRU lists in various places in the code, @@ -217,7 +227,7 @@ struct lruvec { /* Evictions & activations on the inactive file list */ atomic_long_t inactive_age; #ifdef CONFIG_MEMCG - struct zone *zone; + struct pglist_data *pgdat; #endif }; @@ -269,6 +279,11 @@ struct per_cpu_pageset { #endif }; +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; +}; + #endif /* !__GENERATING_BOUNDS.H */ enum zone_type { @@ -350,22 +365,9 @@ struct zone { #ifdef CONFIG_NUMA int node; #endif - - /* - * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on - * this zone's LRU. Maintained by the pageout code. - */ - unsigned int inactive_ratio; - struct pglist_data *zone_pgdat; struct per_cpu_pageset __percpu *pageset; - /* - * This is a per-zone reserve of pages that are not available - * to userspace allocations. - */ - unsigned long totalreserve_pages; - #ifndef CONFIG_SPARSEMEM /* * Flags for a pageblock_nr_pages block. See pageblock-flags.h. @@ -374,14 +376,6 @@ struct zone { unsigned long *pageblock_flags; #endif /* CONFIG_SPARSEMEM */ -#ifdef CONFIG_NUMA - /* - * zone reclaim becomes active if more unmapped pages exist. - */ - unsigned long min_unmapped_pages; - unsigned long min_slab_pages; -#endif /* CONFIG_NUMA */ - /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; @@ -474,24 +468,21 @@ struct zone { unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; + /* Write-intensive fields used from the page allocator */ ZONE_PADDING(_pad1_) + /* free areas of different sizes */ struct free_area free_area[MAX_ORDER]; /* zone flags, see below */ unsigned long flags; - /* Write-intensive fields used from the page allocator */ + /* Primarily protects free_area */ spinlock_t lock; + /* Write-intensive fields used by compaction and vmstats. */ ZONE_PADDING(_pad2_) - /* Write-intensive fields used by page reclaim */ - - /* Fields commonly accessed by the page reclaim scanner */ - spinlock_t lru_lock; - struct lruvec lruvec; - /* * When free pages are below this point, additional steps are taken * when reading the number of free pages to avoid per-cpu counter @@ -529,20 +520,18 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; } ____cacheline_internodealigned_in_smp; -enum zone_flags { - ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ - ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ - ZONE_CONGESTED, /* zone has many dirty pages backed by +enum pgdat_flags { + PGDAT_CONGESTED, /* pgdat has many dirty pages backed by * a congested BDI */ - ZONE_DIRTY, /* reclaim scanning has recently found + PGDAT_DIRTY, /* reclaim scanning has recently found * many dirty file pages at the tail * of the LRU. */ - ZONE_WRITEBACK, /* reclaim scanning has recently found + PGDAT_WRITEBACK, /* reclaim scanning has recently found * many pages under writeback */ - ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ + PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; static inline unsigned long zone_end_pfn(const struct zone *zone) @@ -666,8 +655,9 @@ typedef struct pglist_data { wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by mem_hotplug_begin/end() */ - int kswapd_max_order; - enum zone_type classzone_idx; + int kswapd_order; + enum zone_type kswapd_classzone_idx; + #ifdef CONFIG_COMPACTION int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; @@ -684,6 +674,23 @@ typedef struct pglist_data { /* Number of pages migrated during the rate limiting time interval */ unsigned long numabalancing_migrate_nr_pages; #endif + /* + * This is a per-node reserve of pages that are not available + * to userspace allocations. + */ + unsigned long totalreserve_pages; + +#ifdef CONFIG_NUMA + /* + * zone reclaim becomes active if more unmapped pages exist. + */ + unsigned long min_unmapped_pages; + unsigned long min_slab_pages; +#endif /* CONFIG_NUMA */ + + /* Write-intensive fields used by page reclaim */ + ZONE_PADDING(_pad1_) + spinlock_t lru_lock; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* @@ -698,6 +705,23 @@ typedef struct pglist_data { struct list_head split_queue; unsigned long split_queue_len; #endif + + /* Fields commonly accessed by the page reclaim scanner */ + struct lruvec lruvec; + + /* + * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on + * this node's LRU. Maintained by the pageout code. + */ + unsigned int inactive_ratio; + + unsigned long flags; + + ZONE_PADDING(_pad2_) + + /* Per-node vmstats */ + struct per_cpu_nodestat __percpu *per_cpu_nodestats; + atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) @@ -711,6 +735,15 @@ typedef struct pglist_data { #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) +static inline spinlock_t *zone_lru_lock(struct zone *zone) +{ + return &zone->zone_pgdat->lru_lock; +} + +static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) +{ + return &pgdat->lruvec; +} static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) { @@ -746,8 +779,12 @@ static inline bool is_dev_zone(const struct zone *zone) extern struct mutex zonelists_mutex; void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int classzone_idx, unsigned int alloc_flags, + long free_pages); bool zone_watermark_ok(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, int alloc_flags); + unsigned long mark, int classzone_idx, + unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx); enum memmap_context { @@ -759,12 +796,12 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, extern void lruvec_init(struct lruvec *lruvec); -static inline struct zone *lruvec_zone(struct lruvec *lruvec) +static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) { #ifdef CONFIG_MEMCG - return lruvec->zone; + return lruvec->pgdat; #else - return container_of(lruvec, struct zone, lruvec); + return container_of(lruvec, struct pglist_data, lruvec); #endif } @@ -791,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) -static inline int populated_zone(struct zone *zone) +/* + * Returns true if a zone has pages managed by the buddy allocator. + * All the reclaim decisions have to use this function rather than + * populated_zone(). If the whole zone is reserved then we can easily + * end up with populated_zone() && !managed_zone(). + */ +static inline bool managed_zone(struct zone *zone) +{ + return zone->managed_pages; +} + +/* Returns true if a zone has memory */ +static inline bool populated_zone(struct zone *zone) { - return (!!zone->present_pages); + return zone->present_pages; } extern int movable_zone; @@ -828,10 +877,7 @@ static inline int is_highmem_idx(enum zone_type idx) static inline int is_highmem(struct zone *zone) { #ifdef CONFIG_HIGHMEM - int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; - return zone_off == ZONE_HIGHMEM * sizeof(*zone) || - (zone_off == ZONE_MOVABLE * sizeof(*zone) && - zone_movable_is_highmem()); + return is_highmem_idx(zone_idx(zone)); #else return 0; #endif @@ -922,6 +968,10 @@ static inline int zonelist_node_idx(struct zoneref *zoneref) #endif /* CONFIG_NUMA */ } +struct zoneref *__next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes); + /** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point * @z - The cursor used as a starting point for the search @@ -934,9 +984,14 @@ static inline int zonelist_node_idx(struct zoneref *zoneref) * being examined. It should be advanced by one before calling * next_zones_zonelist again. */ -struct zoneref *next_zones_zonelist(struct zoneref *z, +static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, - nodemask_t *nodes); + nodemask_t *nodes) +{ + if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) + return z; + return __next_zones_zonelist(z, highest_zoneidx, nodes); +} /** * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist @@ -952,13 +1007,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, - nodemask_t *nodes, - struct zone **zone) + nodemask_t *nodes) { - struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs, + return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes); - *zone = zonelist_zone(z); - return z; } /** @@ -973,10 +1025,17 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, * within a given nodemask */ #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ - for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ + for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ - zone = zonelist_zone(z)) \ + zone = zonelist_zone(z)) + +#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ + for (zone = z->zone; \ + zone; \ + z = next_zones_zonelist(++z, highidx, nodemask), \ + zone = zonelist_zone(z)) + /** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index @@ -1056,7 +1115,7 @@ struct mem_section { unsigned long *pageblock_flags; #ifdef CONFIG_PAGE_EXTENSION /* - * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use + * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use * section. (see page_ext.h about this.) */ struct page_ext *page_ext; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 6e4c645e1c0d..ed84c07f6a51 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -657,4 +657,20 @@ struct ulpi_device_id { kernel_ulong_t driver_data; }; +/** + * struct fsl_mc_device_id - MC object device identifier + * @vendor: vendor ID + * @obj_type: MC object type + * @ver_major: MC object version major number + * @ver_minor: MC object version minor number + * + * Type of entries in the "device Id" table for MC object devices supported by + * a MC object device driver. The last entry of the table has vendor set to 0x0 + */ +struct fsl_mc_device_id { + __u16 vendor; + const char obj_type[16]; +}; + + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 2bb0c3085706..0c3207d26ac0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -18,6 +18,7 @@ #include <linux/moduleparam.h> #include <linux/jump_label.h> #include <linux/export.h> +#include <linux/extable.h> /* only as arch move module.h -> extable.h */ #include <linux/rbtree_latch.h> #include <linux/percpu.h> @@ -37,6 +38,7 @@ struct modversion_info { }; struct module; +struct exception_table_entry; struct module_kobject { struct kobject kobj; @@ -155,18 +157,6 @@ extern void cleanup_module(void); #define __INITRODATA_OR_MODULE __INITRODATA #endif /*CONFIG_MODULES*/ -/* Archs provide a method of finding the correct exception table. */ -struct exception_table_entry; - -const struct exception_table_entry * -search_extable(const struct exception_table_entry *first, - const struct exception_table_entry *last, - unsigned long value); -void sort_extable(struct exception_table_entry *start, - struct exception_table_entry *finish); -void sort_main_extable(void); -void trim_init_extable(struct module *m); - /* Generic info of form tag = "info" */ #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) @@ -268,9 +258,6 @@ extern const typeof(name) __mod_##type##__##name##_device_table \ * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) -/* Given an address, look for it in the exception tables */ -const struct exception_table_entry *search_exception_tables(unsigned long add); - struct notifier_block; #ifdef CONFIG_MODULES @@ -311,6 +298,8 @@ struct module_layout { unsigned int text_size; /* Size of RO section of the module (text+rodata) */ unsigned int ro_size; + /* Size of RO after init section */ + unsigned int ro_after_init_size; #ifdef CONFIG_MODULES_TREE_LOOKUP struct mod_tree_node mtn; @@ -330,6 +319,15 @@ struct mod_kallsyms { char *strtab; }; +#ifdef CONFIG_LIVEPATCH +struct klp_modinfo { + Elf_Ehdr hdr; + Elf_Shdr *sechdrs; + char *secstrings; + unsigned int symndx; +}; +#endif + struct module { enum module_state state; @@ -456,7 +454,11 @@ struct module { #endif #ifdef CONFIG_LIVEPATCH + bool klp; /* Is this a livepatch module? */ bool klp_alive; + + /* Elf information */ + struct klp_modinfo *klp_info; #endif #ifdef CONFIG_MODULE_UNLOAD @@ -562,8 +564,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data); -extern void __module_put_and_exit(struct module *mod, long code) - __attribute__((noreturn)); +extern void __noreturn __module_put_and_exit(struct module *mod, + long code); #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code) #ifdef CONFIG_MODULE_UNLOAD @@ -617,9 +619,6 @@ const char *module_address_lookup(unsigned long addr, int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); -/* For extable.c to search modules' exception tables. */ -const struct exception_table_entry *search_module_extables(unsigned long addr); - int register_module_notifier(struct notifier_block *nb); int unregister_module_notifier(struct notifier_block *nb); @@ -630,14 +629,19 @@ static inline bool module_requested_async_probing(struct module *module) return module && module->async_probe_requested; } -#else /* !CONFIG_MODULES... */ - -/* Given an address, look for it in the exception tables. */ -static inline const struct exception_table_entry * -search_module_extables(unsigned long addr) +#ifdef CONFIG_LIVEPATCH +static inline bool is_livepatch_module(struct module *mod) { - return NULL; + return mod->klp; } +#else /* !CONFIG_LIVEPATCH */ +static inline bool is_livepatch_module(struct module *mod) +{ + return false; +} +#endif /* CONFIG_LIVEPATCH */ + +#else /* !CONFIG_MODULES... */ static inline struct module *__module_address(unsigned long addr) { @@ -763,12 +767,12 @@ extern int module_sysfs_initialized; #ifdef CONFIG_DEBUG_SET_MODULE_RONX extern void set_all_modules_text_rw(void); extern void set_all_modules_text_ro(void); -extern void module_enable_ro(const struct module *mod); +extern void module_enable_ro(const struct module *mod, bool after_init); extern void module_disable_ro(const struct module *mod); #else static inline void set_all_modules_text_rw(void) { } static inline void set_all_modules_text_ro(void) { } -static inline void module_enable_ro(const struct module *mod) { } +static inline void module_enable_ro(const struct module *mod, bool after_init) { } static inline void module_disable_ro(const struct module *mod) { } #endif diff --git a/include/linux/mount.h b/include/linux/mount.h index f822c3c11377..54a594d49733 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -81,6 +81,7 @@ extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); extern struct vfsmount *mnt_clone_internal(struct path *path); extern int __mnt_is_readonly(struct vfsmount *mnt); +extern bool mnt_may_suid(struct vfsmount *mnt); struct path; extern struct vfsmount *clone_private_mount(struct path *path); diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 3a5abe95affd..1cc5ffb769af 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -80,8 +80,7 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); -int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); -int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, +int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); #define log_mpidump g10_log_mpidump diff --git a/include/linux/mroute.h b/include/linux/mroute.h index bf9b322cb0b0..e5fb81376e92 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -104,6 +104,7 @@ struct mfc_cache { unsigned long bytes; unsigned long pkt; unsigned long wrong_if; + unsigned long lastuse; unsigned char ttls[MAXVIFS]; /* TTL thresholds */ } res; } mfc_un; @@ -119,5 +120,5 @@ struct mfc_cache { struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 66982e764051..19a1c0c2993b 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -92,6 +92,7 @@ struct mfc6_cache { unsigned long bytes; unsigned long pkt; unsigned long wrong_if; + unsigned long lastuse; unsigned char ttls[MAXMIFS]; /* TTL thresholds */ } res; } mfc_un; @@ -115,7 +116,7 @@ struct mfc6_cache { struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #ifdef CONFIG_IPV6_MROUTE extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); diff --git a/include/linux/msi.h b/include/linux/msi.h index 8b425c66305a..e8c81fbd5f9c 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -47,6 +47,7 @@ struct fsl_mc_msi_desc { * @nvec_used: The number of vectors used * @dev: Pointer to the device which uses this descriptor * @msg: The last set MSI message cached for reuse + * @affinity: Optional pointer to a cpu affinity mask for this descriptor * * @masked: [PCI MSI/X] Mask bits * @is_msix: [PCI MSI/X] True if MSI-X @@ -67,6 +68,7 @@ struct msi_desc { unsigned int nvec_used; struct device *dev; struct msi_msg msg; + const struct cpumask *affinity; union { /* PCI MSI/X specific data */ @@ -264,12 +266,12 @@ enum { * callbacks. */ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), - /* Build identity map between hwirq and irq */ - MSI_FLAG_IDENTITY_MAP = (1 << 2), /* Support multiple PCI MSI interrupts */ - MSI_FLAG_MULTI_PCI_MSI = (1 << 3), + MSI_FLAG_MULTI_PCI_MSI = (1 << 2), /* Support PCI MSIX interrupts */ - MSI_FLAG_PCI_MSIX = (1 << 4), + MSI_FLAG_PCI_MSIX = (1 << 3), + /* Needs early activate, required for PCI */ + MSI_FLAG_ACTIVATE_EARLY = (1 << 4), }; int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h index c8be32e9fc49..ad3c3488073c 100644 --- a/include/linux/mtd/fsmc.h +++ b/include/linux/mtd/fsmc.h @@ -103,24 +103,6 @@ #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) -/* - * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 - * and it has to be read consecutively and immediately after the 512 - * byte data block for hardware to generate the error bit offsets - * Managing the ecc bytes in the following way is easier. This way is - * similar to oobfree structure maintained already in u-boot nand driver - */ -#define MAX_ECCPLACE_ENTRIES 32 - -struct fsmc_nand_eccplace { - uint8_t offset; - uint8_t length; -}; - -struct fsmc_eccplace { - struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; -}; - struct fsmc_nand_timings { uint8_t tclr; uint8_t tar; diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 5e0eb7ccabd4..3aa56e3104bb 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -122,18 +122,13 @@ #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 -# ifdef map_bankwidth -# undef map_bankwidth -# define map_bankwidth(map) ((map)->bankwidth) -# undef map_bankwidth_is_large -# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) -# undef map_words -# define map_words(map) map_calc_words(map) -# else -# define map_bankwidth(map) 32 -# define map_bankwidth_is_large(map) (1) -# define map_words(map) map_calc_words(map) -# endif +/* always use indirect access for 256-bit to preserve kernel stack */ +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 32 diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 771272187316..29a170612203 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -96,16 +96,35 @@ struct mtd_oob_ops { #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 +/** + * struct mtd_oob_region - oob region definition + * @offset: region offset + * @length: region length + * + * This structure describes a region of the OOB area, and is used + * to retrieve ECC or free bytes sections. + * Each section is defined by an offset within the OOB area and a + * length. + */ +struct mtd_oob_region { + u32 offset; + u32 length; +}; + /* - * Internal ECC layout control structure. For historical reasons, there is a - * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained - * for export to user-space via the ECCGETLAYOUT ioctl. - * nand_ecclayout should be expandable in the future simply by the above macros. + * struct mtd_ooblayout_ops - NAND OOB layout operations + * @ecc: function returning an ECC region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * ECC sections. + * @free: function returning a free region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * free sections. */ -struct nand_ecclayout { - __u32 eccbytes; - __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; - struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; +struct mtd_ooblayout_ops { + int (*ecc)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); + int (*free)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); }; struct module; /* only needed for owner field in mtd_info */ @@ -166,8 +185,8 @@ struct mtd_info { const char *name; int index; - /* ECC layout structure pointer - read only! */ - struct nand_ecclayout *ecclayout; + /* OOB layout description */ + const struct mtd_ooblayout_ops *ooblayout; /* the ecc step size. */ unsigned int ecc_step_size; @@ -253,6 +272,30 @@ struct mtd_info { int usecount; }; +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, + int *section, + struct mtd_oob_region *oobregion); +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); + +static inline void mtd_set_ooblayout(struct mtd_info *mtd, + const struct mtd_ooblayout_ops *ooblayout) +{ + mtd->ooblayout = ooblayout; +} + static inline void mtd_set_of_node(struct mtd_info *mtd, struct device_node *np) { @@ -283,17 +326,7 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); - -static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, - struct mtd_oob_ops *ops) -{ - ops->retlen = ops->oobretlen = 0; - if (!mtd->_write_oob) - return -EOPNOTSUPP; - if (!(mtd->flags & MTD_WRITEABLE)) - return -EROFS; - return mtd->_write_oob(mtd, to, ops); -} +int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops); int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 56574ba36555..8dd6e01f45c0 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -116,9 +116,14 @@ typedef enum { NAND_ECC_HW, NAND_ECC_HW_SYNDROME, NAND_ECC_HW_OOB_FIRST, - NAND_ECC_SOFT_BCH, } nand_ecc_modes_t; +enum nand_ecc_algo { + NAND_ECC_UNKNOWN, + NAND_ECC_HAMMING, + NAND_ECC_BCH, +}; + /* * Constants for Hardware ECC */ @@ -458,6 +463,7 @@ struct nand_hw_control { /** * struct nand_ecc_ctrl - Control structure for ECC * @mode: ECC mode + * @algo: ECC algorithm * @steps: number of ECC steps per page * @size: data bytes per ECC step * @bytes: ECC bytes per step @@ -466,7 +472,6 @@ struct nand_hw_control { * @prepad: padding information for syndrome based ECC generators * @postpad: padding information for syndrome based ECC generators * @options: ECC specific options (see NAND_ECC_XXX flags defined above) - * @layout: ECC layout control struct pointer * @priv: pointer to private ECC control data * @hwctl: function to control hardware ECC generator. Must only * be provided if an hardware ECC is available @@ -508,6 +513,7 @@ struct nand_hw_control { */ struct nand_ecc_ctrl { nand_ecc_modes_t mode; + enum nand_ecc_algo algo; int steps; int size; int bytes; @@ -516,7 +522,6 @@ struct nand_ecc_ctrl { int prepad; int postpad; unsigned int options; - struct nand_ecclayout *layout; void *priv; void (*hwctl)(struct mtd_info *mtd, int mode); int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, @@ -740,6 +745,9 @@ struct nand_chip { void *priv; }; +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; + static inline void nand_set_flash_node(struct nand_chip *chip, struct device_node *np) { @@ -775,6 +783,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) * NAND Flash Manufacturer ID Codes */ #define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_ESMT 0xc8 #define NAND_MFR_SAMSUNG 0xec #define NAND_MFR_FUJITSU 0x04 #define NAND_MFR_NATIONAL 0x8f @@ -1070,4 +1079,18 @@ int nand_check_erased_ecc_chunk(void *data, int datalen, void *ecc, int ecclen, void *extraoob, int extraooblen, int threshold); + +/* Default write_oob implementation */ +int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default write_oob syndrome implementation */ +int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); + +/* Default read_oob implementation */ +int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default read_oob syndrome implementation */ +int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); #endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 4596503c9da9..0aaa98b219a4 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -80,7 +80,6 @@ struct onenand_bufferram { * @page_buf: [INTERN] page main data buffer * @oob_buf: [INTERN] page oob data buffer * @subpagesize: [INTERN] holds the subpagesize - * @ecclayout: [REPLACEABLE] the default ecc placement scheme * @bbm: [REPLACEABLE] pointer to Bad Block Management * @priv: [OPTIONAL] pointer to private chip date */ @@ -134,7 +133,6 @@ struct onenand_chip { #endif int subpagesize; - struct nand_ecclayout *ecclayout; void *bbm; diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h index 25f4d2a845c1..65e91d0fa981 100644 --- a/include/linux/mtd/sharpsl.h +++ b/include/linux/mtd/sharpsl.h @@ -14,7 +14,7 @@ struct sharpsl_nand_platform_data { struct nand_bbt_descr *badblock_pattern; - struct nand_ecclayout *ecc_layout; + const struct mtd_ooblayout_ops *ecc_layout; struct mtd_partition *partitions; unsigned int nr_partitions; }; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 3c36113a88e1..c425c7b4c2a0 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -21,6 +21,7 @@ * Sometimes these are the same as CFI IDs, but sometimes they aren't. */ #define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_GIGADEVICE 0xc8 #define SNOR_MFR_INTEL CFI_MFR_INTEL #define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX @@ -172,10 +173,10 @@ struct spi_nor { int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); - int (*read)(struct spi_nor *nor, loff_t from, - size_t len, size_t *retlen, u_char *read_buf); - void (*write)(struct spi_nor *nor, loff_t to, - size_t len, size_t *retlen, const u_char *write_buf); + ssize_t (*read)(struct spi_nor *nor, loff_t from, + size_t len, u_char *read_buf); + ssize_t (*write)(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf); int (*erase)(struct spi_nor *nor, loff_t offs); int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); diff --git a/include/linux/namei.h b/include/linux/namei.h index ec5ec2818a28..f29abda31e6d 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_ROOT 0x2000 #define LOOKUP_EMPTY 0x4000 +extern int path_pts(struct path *path); + extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); static inline int user_path_at(int dfd, const char __user *name, unsigned flags, @@ -79,8 +81,6 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); -struct qstr; -extern struct dentry *lookup_hash(const struct qstr *, struct dentry *); extern int follow_down_one(struct path *); extern int follow_down(struct path *); diff --git a/include/linux/nd.h b/include/linux/nd.h index 5489ab756d1a..f1ea426d6a5e 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -15,6 +15,7 @@ #include <linux/fs.h> #include <linux/ndctl.h> #include <linux/device.h> +#include <linux/badblocks.h> enum nvdimm_event { NVDIMM_REVALIDATE_POISON, @@ -25,6 +26,7 @@ struct nd_device_driver { unsigned long type; int (*probe)(struct device *dev); int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); void (*notify)(struct device *dev, enum nvdimm_event event); }; @@ -55,13 +57,19 @@ static inline struct nd_namespace_common *to_ndns(struct device *dev) } /** - * struct nd_namespace_io - infrastructure for loading an nd_pmem instance + * struct nd_namespace_io - device representation of a persistent memory range * @dev: namespace device created by the nd region driver * @res: struct resource conversion of a NFIT SPA table + * @size: cached resource_size(@res) for fast path size checks + * @addr: virtual address to access the namespace range + * @bb: badblocks list for the namespace range */ struct nd_namespace_io { struct nd_namespace_common common; struct resource res; + resource_size_t size; + void *addr; + struct badblocks bb; }; /** @@ -82,6 +90,7 @@ struct nd_namespace_pmem { * @uuid: namespace name supplied in the dimm label * @id: ida allocated id * @lbasize: blk namespaces have a native sector size when btt not present + * @size: sum of all the resource ranges allocated to this namespace * @num_resources: number of dpa extents to claim * @res: discontiguous dpa extents for given dimm */ @@ -91,6 +100,7 @@ struct nd_namespace_blk { u8 *uuid; int id; unsigned long lbasize; + resource_size_t size; int num_resources; struct resource **res; }; diff --git a/include/linux/net.h b/include/linux/net.h index f840d77c6c31..b9f0ff4d489c 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -185,6 +185,7 @@ struct proto_ops { ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); int (*set_peek_off)(struct sock *sk, int val); + int (*peek_len)(struct socket *sock); }; #define DECLARE_SOCKADDR(type, dst, src) \ @@ -218,8 +219,7 @@ int sock_create_lite(int family, int type, int proto, struct socket **res); struct socket *sock_alloc(void); void sock_release(struct socket *sock); int sock_sendmsg(struct socket *sock, struct msghdr *msg); -int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, - int flags); +int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); struct socket *sockfd_lookup(int fd, int *err); struct socket *sock_from_file(struct file *file, int *err); @@ -252,7 +252,8 @@ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ net_ratelimit()) \ - __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ + ##__VA_ARGS__); \ } while (0) #elif defined(DEBUG) #define net_dbg_ratelimited(fmt, ...) \ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index a734bf43d190..9c6c8ef2e9e7 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -39,17 +39,23 @@ enum { NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ + NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */ NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ - NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */ - NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ + NETIF_F_GSO_IPXIP4_BIT, /* ... IP4 or IP6 over IP4 with TSO */ + NETIF_F_GSO_IPXIP6_BIT, /* ... IP4 or IP6 over IP6 with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ + NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 + * in hardware and all other + * headers in software. + */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ + NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_TUNNEL_REMCSUM_BIT, + NETIF_F_GSO_SCTP_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ @@ -116,11 +122,14 @@ enum { #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) #define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM) -#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) -#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) +#define NETIF_F_GSO_IPXIP4 __NETIF_F(GSO_IPXIP4) +#define NETIF_F_GSO_IPXIP6 __NETIF_F(GSO_IPXIP6) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) +#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) +#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) +#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) @@ -145,10 +154,6 @@ enum { #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) -/* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ - NETIF_F_TSO6 | NETIF_F_UFO) - /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- * this would be contradictory @@ -156,11 +161,16 @@ enum { #define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ NETIF_F_HW_CSUM) -#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \ + NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID) #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ NETIF_F_FSO) +/* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \ + NETIF_F_GSO_SCTP) + /* * If one device supports one of these features, then enable them * for all in netdev_increment_features. @@ -193,8 +203,8 @@ enum { #define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ - NETIF_F_GSO_IPIP | \ - NETIF_F_GSO_SIT | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 78181a88903b..e8d79d4ebcfe 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -61,6 +61,9 @@ struct wireless_dev; /* 802.15.4 specific */ struct wpan_dev; struct mpls_dev; +/* UDP Tunnel offloads */ +struct udp_tunnel_info; +struct bpf_prog; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -90,7 +93,6 @@ void netdev_set_default_ethtool_ops(struct net_device *dev, #define NET_XMIT_SUCCESS 0x00 #define NET_XMIT_DROP 0x01 /* skb dropped */ #define NET_XMIT_CN 0x02 /* congestion notification */ -#define NET_XMIT_POLICED 0x03 /* skb is shot by police */ #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It @@ -106,7 +108,6 @@ enum netdev_tx { __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ NETDEV_TX_OK = 0x00, /* driver took care of packet */ NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ - NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ }; typedef enum netdev_tx netdev_tx_t; @@ -570,28 +571,27 @@ struct netdev_queue { #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) int numa_node; #endif + unsigned long tx_maxrate; + /* + * Number of TX timeouts for this queue + * (/sys/class/net/DEV/Q/trans_timeout) + */ + unsigned long trans_timeout; /* * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; int xmit_lock_owner; /* - * please use this field instead of dev->trans_start + * Time (in jiffies) of last Tx */ unsigned long trans_start; - /* - * Number of TX timeouts for this queue - * (/sys/class/net/DEV/Q/trans_timeout) - */ - unsigned long trans_timeout; - unsigned long state; #ifdef CONFIG_BQL struct dql dql; #endif - unsigned long tx_maxrate; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -787,6 +787,7 @@ enum { TC_SETUP_MQPRIO, TC_SETUP_CLSU32, TC_SETUP_CLSFLOWER, + TC_SETUP_MATCHALL, }; struct tc_cls_u32_offload; @@ -797,9 +798,37 @@ struct tc_to_netdev { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; + struct tc_cls_matchall_offload *cls_mall; }; }; +/* These structures hold the attributes of xdp state that are being passed + * to the netdevice through the xdp op. + */ +enum xdp_netdev_command { + /* Set or clear a bpf program used in the earliest stages of packet + * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee + * is responsible for calling bpf_prog_put on any old progs that are + * stored. In case of error, the callee need not release the new prog + * reference, but on success it takes ownership and must bpf_prog_put + * when it is no longer used. + */ + XDP_SETUP_PROG, + /* Check if a bpf program is set on the device. The callee should + * return true if a program is currently attached and running. + */ + XDP_QUERY_PROG, +}; + +struct netdev_xdp { + enum xdp_netdev_command command; + union { + /* XDP_SETUP_PROG */ + struct bpf_prog *prog; + /* XDP_QUERY_PROG */ + bool prog_attached; + }; +}; /* * This structure defines the management hooks for network devices. @@ -831,7 +860,6 @@ struct tc_to_netdev { * the queue before that can happen; it's for obsolete devices and weird * corner cases, but the stack really does a non-trivial amount * of useless work if you return NETDEV_TX_BUSY. - * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required; cannot be NULL. * * netdev_features_t (*ndo_fix_features)(struct net_device *dev, @@ -1028,31 +1056,18 @@ struct tc_to_netdev { * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. * - * void (*ndo_add_vxlan_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by vxlan to notify a driver about the UDP port and socket - * address family that vxlan is listening to. It is called only when - * a new port starts listening. The operation is protected by the - * vxlan_net->sock_lock. - * - * void (*ndo_add_geneve_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by geneve to notify a driver about the UDP port and socket - * address family that geneve is listnening to. It is called only when - * a new port starts listening. The operation is protected by the - * geneve_net->sock_lock. - * - * void (*ndo_del_geneve_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by geneve to notify the driver about a UDP port and socket - * address family that geneve is not listening to anymore. The operation - * is protected by the geneve_net->sock_lock. - * - * void (*ndo_del_vxlan_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by vxlan to notify the driver about a UDP port and socket - * address family that vxlan is not listening to anymore. The operation - * is protected by the vxlan_net->sock_lock. + * void (*ndo_udp_tunnel_add)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify a driver about the UDP port and socket + * address family that a UDP tunnel is listnening to. It is called only + * when a new port starts listening. The operation is protected by the + * RTNL. + * + * void (*ndo_udp_tunnel_del)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify the driver about a UDP port and socket + * address family that the UDP tunnel is not listening to anymore. The + * operation is protected by the RTNL. * * void* (*ndo_dfwd_add_station)(struct net_device *pdev, * struct net_device *dev) @@ -1102,6 +1117,9 @@ struct tc_to_netdev { * appropriate rx headroom value allows avoiding skb head copy on * forward. Setting a negative value resets the rx headroom to the * default value. + * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); + * This function is used to set or query state related to XDP on the + * netdevice. See definition of enum xdp_netdev_command for details. * */ struct net_device_ops { @@ -1224,8 +1242,10 @@ struct net_device_ops { netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); - int (*ndo_neigh_construct)(struct neighbour *n); - void (*ndo_neigh_destroy)(struct neighbour *n); + int (*ndo_neigh_construct)(struct net_device *dev, + struct neighbour *n); + void (*ndo_neigh_destroy)(struct net_device *dev, + struct neighbour *n); int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], @@ -1261,18 +1281,10 @@ struct net_device_ops { struct netdev_phys_item_id *ppid); int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, size_t len); - void (*ndo_add_vxlan_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_del_vxlan_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_add_geneve_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_del_geneve_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); + void (*ndo_udp_tunnel_add)(struct net_device *dev, + struct udp_tunnel_info *ti); + void (*ndo_udp_tunnel_del)(struct net_device *dev, + struct udp_tunnel_info *ti); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, @@ -1292,6 +1304,8 @@ struct net_device_ops { struct sk_buff *skb); void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); + int (*ndo_xdp)(struct net_device *dev, + struct netdev_xdp *xdp); }; /** @@ -1460,6 +1474,8 @@ enum netdev_priv_flags { * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations + * @ndisc_ops: Includes callbacks for different IPv6 neighbour + * discovery handling. Necessary for e.g. 6LoWPAN. * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * @@ -1487,8 +1503,7 @@ enum netdev_priv_flags { * @perm_addr: Permanent hw address * @addr_assign_type: Hw address assignment type * @addr_len: Hardware address length - * @neigh_priv_len; Used in neigh_alloc(), - * initialized only in atm/clip.c + * @neigh_priv_len: Used in neigh_alloc() * @dev_id: Used to differentiate devices that share * the same link layer address * @dev_port: Used to differentiate devices that share @@ -1548,7 +1563,6 @@ enum netdev_priv_flags { * * @offload_fwd_mark: Offload device fwding mark * - * @trans_start: Time (in jiffies) of last Tx * @watchdog_timeo: Represents the timeout that is used by * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers @@ -1586,8 +1600,6 @@ enum netdev_priv_flags { * @gso_max_size: Maximum size of generic segmentation offload * @gso_max_segs: Maximum number of segments that can be passed to the * NIC for GSO - * @gso_min_segs: Minimum number of segments that can be passed to the - * NIC for GSO * * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device @@ -1600,7 +1612,8 @@ enum netdev_priv_flags { * @phydev: Physical device may attach itself * for hardware timestamping * - * @qdisc_tx_busylock: XXX: need comments on this one + * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the @@ -1656,6 +1669,7 @@ struct net_device { netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; + netdev_features_t gso_partial_features; int ifindex; int group; @@ -1678,6 +1692,9 @@ struct net_device { #ifdef CONFIG_NET_L3_MASTER_DEV const struct l3mdev_ops *l3mdev_ops; #endif +#if IS_ENABLED(CONFIG_IPV6) + const struct ndisc_ops *ndisc_ops; +#endif const struct header_ops *header_ops; @@ -1798,13 +1815,6 @@ struct net_device { #endif /* These may be needed for future network-power-down code. */ - - /* - * trans_start here is expensive for high speed devices on SMP, - * please use netdev_queue->trans_start instead. - */ - unsigned long trans_start; - struct timer_list watchdog_timer; int __percpu *pcpu_refcnt; @@ -1858,7 +1868,7 @@ struct net_device { unsigned int gso_max_size; #define GSO_MAX_SEGS 65535 u16 gso_max_segs; - u16 gso_min_segs; + #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif @@ -1874,6 +1884,7 @@ struct net_device { #endif struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; bool proto_down; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -1956,6 +1967,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, f(dev, &dev->_tx[i], arg); } +#define netdev_lockdep_set_classes(dev) \ +{ \ + static struct lock_class_key qdisc_tx_busylock_key; \ + static struct lock_class_key qdisc_running_key; \ + static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ + unsigned int i; \ + \ + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ + (dev)->qdisc_running_key = &qdisc_running_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ + for (i = 0; i < (dev)->num_tx_queues; i++) \ + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ + &qdisc_xmit_lock_key); \ +} + struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv); @@ -2123,7 +2151,10 @@ struct napi_gro_cb { /* Used in GRE, set in fou/gue_gro_receive */ u8 is_fou:1; - /* 6 bit hole */ + /* Used to determine if flush_id can be ignored */ + u8 is_atomic:1; + + /* 5 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2162,26 +2193,6 @@ struct packet_offload { struct list_head list; }; -struct udp_offload; - -/* 'skb->encapsulation' is set before gro_complete() is called. gro_complete() - * must set 'skb->inner_mac_header' to the beginning of tunnel payload. - */ -struct udp_offload_callbacks { - struct sk_buff **(*gro_receive)(struct sk_buff **head, - struct sk_buff *skb, - struct udp_offload *uoff); - int (*gro_complete)(struct sk_buff *skb, - int nhoff, - struct udp_offload *uoff); -}; - -struct udp_offload { - __be16 port; - u8 ipproto; - struct udp_offload_callbacks callbacks; -}; - /* often modified stats are per-CPU, other are shared (netdev->stats) */ struct pcpu_sw_netstats { u64 rx_packets; @@ -2262,6 +2273,8 @@ struct netdev_lag_lower_state_info { #define NETDEV_BONDING_INFO 0x0019 #define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_CHANGELOWERSTATE 0x001B +#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C +#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2397,6 +2410,8 @@ void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); DECLARE_PER_CPU(int, xmit_recursion); +#define XMIT_RECURSION_LIMIT 10 + static inline int dev_recursion_level(void) { return this_cpu_read(xmit_recursion); @@ -2753,7 +2768,6 @@ struct softnet_data { /* stats */ unsigned int processed; unsigned int time_squeeze; - unsigned int cpu_collision; unsigned int received_rps; #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; @@ -2766,11 +2780,15 @@ struct softnet_data { struct sk_buff *completion_queue; #ifdef CONFIG_RPS - /* Elements below can be accessed between CPUs for RPS */ + /* input_queue_head should be written by cpu owning this struct, + * and only read by other cpus. Worth using a cache line. + */ + unsigned int input_queue_head ____cacheline_aligned_in_smp; + + /* Elements below can be accessed between CPUs for RPS/RFS */ struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; unsigned int cpu; - unsigned int input_queue_head; unsigned int input_queue_tail; #endif unsigned int dropped; @@ -2807,7 +2825,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev) netif_schedule_queue(netdev_get_tx_queue(dev, i)); } -static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) +static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) { clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } @@ -2857,7 +2875,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) } } -static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) +static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) { set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } @@ -3249,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi) napi->skb = NULL; } +bool netdev_is_rx_handler_busy(struct net_device *dev); int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data); @@ -3274,12 +3293,16 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); +int dev_change_xdp_fd(struct net_device *dev, int fd); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); -bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); +bool is_skb_forwardable(const struct net_device *dev, + const struct sk_buff *skb); + +void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; @@ -3496,6 +3519,15 @@ static inline void txq_trans_update(struct netdev_queue *txq) txq->trans_start = jiffies; } +/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ +static inline void netif_trans_update(struct net_device *dev) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + + if (txq->trans_start != jiffies) + txq->trans_start = jiffies; +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -3771,7 +3803,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; extern int weight_p; -extern int bpf_jit_enable; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, @@ -3812,12 +3843,30 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter); + #define netdev_for_each_lower_dev(dev, ldev, iter) \ for (iter = (dev)->adj_list.lower.next, \ ldev = netdev_lower_get_next(dev, &(iter)); \ ldev; \ ldev = netdev_lower_get_next(dev, &(iter))) +struct net_device *netdev_all_lower_get_next(struct net_device *dev, + struct list_head **iter); +struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, + struct list_head **iter); + +#define netdev_for_each_all_lower_dev(dev, ldev, iter) \ + for (iter = (dev)->all_adj_list.lower.next, \ + ldev = netdev_all_lower_get_next(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next(dev, &(iter))) + +#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ + for (iter = (dev)->all_adj_list.lower.next, \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) + void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); @@ -3833,14 +3882,17 @@ void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info); +int netdev_default_l2upper_neigh_construct(struct net_device *dev, + struct neighbour *n); +void netdev_default_l2upper_neigh_destroy(struct net_device *dev, + struct neighbour *n); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; void netdev_rss_key_fill(void *buffer, size_t len); -int dev_get_nest_level(struct net_device *dev, - bool (*type_check)(const struct net_device *dev)); +int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); @@ -4014,15 +4066,18 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } @@ -4156,6 +4211,13 @@ static inline void netif_keep_dst(struct net_device *dev) dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); } +/* return true if dev can't cope with mtu frames that need vlan tag insertion */ +static inline bool netif_reduces_vlan_mtu(struct net_device *dev) +{ + /* TODO: reserve and use an additional IFF bit, if we get more users */ + return dev->priv_flags & IFF_MACSEC; +} + extern struct pernet_operations __net_initdata loopback_net_ops; /* Logging, debugging and troubleshooting/diagnostic helpers. */ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index f48b8a664b0f..83b9a2e0d8d4 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -351,7 +351,8 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) return ((skbinfo->skbmark || skbinfo->skbmarkmask) && nla_put_net64(skb, IPSET_ATTR_SKBMARK, cpu_to_be64((u64)skbinfo->skbmark << 32 | - skbinfo->skbmarkmask))) || + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || (skbinfo->skbprio && nla_put_net32(skb, IPSET_ATTR_SKBPRIO, cpu_to_be32(skbinfo->skbprio))) || @@ -374,9 +375,11 @@ static inline bool ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) { return nla_put_net64(skb, IPSET_ATTR_BYTES, - cpu_to_be64(ip_set_get_bytes(counter))) || + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || nla_put_net64(skb, IPSET_ATTR_PACKETS, - cpu_to_be64(ip_set_get_packets(counter))); + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); } static inline void diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h index 80ca889b164e..664da0048625 100644 --- a/include/linux/netfilter/nfnetlink_acct.h +++ b/include/linux/netfilter/nfnetlink_acct.h @@ -15,6 +15,6 @@ struct nf_acct; struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name); void nfnl_acct_put(struct nf_acct *acct); void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); -extern int nfnl_acct_overquota(const struct sk_buff *skb, - struct nf_acct *nfacct); +int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb, + struct nf_acct *nfacct); #endif /* _NFNL_ACCT_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 80a305b85323..2ad1a2b289b5 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -6,6 +6,10 @@ #include <linux/static_key.h> #include <uapi/linux/netfilter/x_tables.h> +/* Test a struct->invflags and a boolean for inequality */ +#define NF_INVF(ptr, flag, boolean) \ + ((boolean) ^ !!((ptr)->invflags & (flag))) + /** * struct xt_action_param - parameters for matches/targets * @@ -242,11 +246,22 @@ void xt_unregister_match(struct xt_match *target); int xt_register_matches(struct xt_match *match, unsigned int n); void xt_unregister_matches(struct xt_match *match, unsigned int n); +int xt_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); + +unsigned int *xt_alloc_entry_offsets(unsigned int size); +bool xt_find_jump_offset(const unsigned int *offsets, + unsigned int target, unsigned int size); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat); + struct xt_table *xt_register_table(struct net *net, const struct xt_table *table, struct xt_table_info *bootstrap, @@ -373,16 +388,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a, * allows us to return 0 for single core systems without forcing * callers to deal with SMP vs. NONSMP issues. */ -static inline u64 xt_percpu_counter_alloc(void) +static inline unsigned long xt_percpu_counter_alloc(void) { if (nr_cpu_ids > 1) { void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), sizeof(struct xt_counters)); if (res == NULL) - return (u64) -ENOMEM; + return -ENOMEM; - return (u64) (__force unsigned long) res; + return (__force unsigned long) res; } return 0; @@ -480,7 +495,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number); int xt_compat_calc_jump(u_int8_t af, unsigned int offset); int xt_compat_match_offset(const struct xt_match *match); -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, unsigned int *size); int xt_compat_match_to_user(const struct xt_entry_match *m, void __user **dstptr, unsigned int *size); @@ -490,6 +505,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, unsigned int *size); int xt_compat_target_to_user(const struct xt_entry_target *t, void __user **dstptr, unsigned int *size); +int xt_compat_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); #endif /* CONFIG_COMPAT */ #endif /* _X_TABLES_H */ diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 2ea517c7c6b9..984b2112c77b 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -115,8 +115,6 @@ extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); -/* Used in the kernel match() functions */ -#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) /* True if the hook mask denotes that the rule is in a base chain, * used in the check() functions */ #define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 011433478a14..c6564ada9beb 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -50,12 +50,27 @@ struct nfs4_label { typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; -struct nfs_stateid4 { - __be32 seqid; - char other[NFS4_STATEID_OTHER_SIZE]; -} __attribute__ ((packed)); +struct nfs4_stateid_struct { + union { + char data[NFS4_STATEID_SIZE]; + struct { + __be32 seqid; + char other[NFS4_STATEID_OTHER_SIZE]; + } __attribute__ ((packed)); + }; + + enum { + NFS4_INVALID_STATEID_TYPE = 0, + NFS4_SPECIAL_STATEID_TYPE, + NFS4_OPEN_STATEID_TYPE, + NFS4_LOCK_STATEID_TYPE, + NFS4_DELEGATION_STATEID_TYPE, + NFS4_LAYOUT_STATEID_TYPE, + NFS4_PNFS_DS_STATEID_TYPE, + } type; +}; -typedef struct nfs_stateid4 nfs4_stateid; +typedef struct nfs4_stateid_struct nfs4_stateid; enum nfs_opnum4 { OP_ACCESS = 3, @@ -504,6 +519,7 @@ enum { NFSPROC4_CLNT_DEALLOCATE, NFSPROC4_CLNT_LAYOUTSTATS, NFSPROC4_CLNT_CLONE, + NFSPROC4_CLNT_COPY, }; /* nfs41 types */ @@ -621,8 +637,21 @@ enum pnfs_update_layout_reason { PNFS_UPDATE_LAYOUT_IO_TEST_FAIL, PNFS_UPDATE_LAYOUT_FOUND_CACHED, PNFS_UPDATE_LAYOUT_RETURN, + PNFS_UPDATE_LAYOUT_RETRY, PNFS_UPDATE_LAYOUT_BLOCKED, + PNFS_UPDATE_LAYOUT_INVALID_OPEN, PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, }; +#define NFS4_OP_MAP_NUM_LONGS \ + DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long)) +#define NFS4_OP_MAP_NUM_WORDS \ + (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32)) +struct nfs4_op_map { + union { + unsigned long longs[NFS4_OP_MAP_NUM_LONGS]; + u32 words[NFS4_OP_MAP_NUM_WORDS]; + } u; +}; + #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 67300f8e5f2f..810124b33327 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -163,11 +163,9 @@ struct nfs_inode { /* Open contexts for shared mmap writes */ struct list_head open_files; - /* Number of in-flight sillydelete RPC calls */ - atomic_t silly_count; - /* List of deferred sillydelete requests */ - struct hlist_head silly_list; - wait_queue_head_t waitqueue; + /* Readers: in-flight sillydelete RPC calls */ + /* Writers: rmdir */ + struct rw_semaphore rmdir_sem; #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; @@ -207,12 +205,12 @@ struct nfs_inode { #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ -#define NFS_INO_FLUSHING (4) /* inode is flushing out data */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ +#define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { @@ -353,7 +351,6 @@ extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *ino extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); -extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, @@ -445,10 +442,9 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) /* * linux/fs/nfs/direct.c */ -extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t); +extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); extern ssize_t nfs_file_direct_read(struct kiocb *iocb, - struct iov_iter *iter, - loff_t pos); + struct iov_iter *iter); extern ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter); @@ -492,9 +488,6 @@ extern void nfs_release_automount_timer(void); * linux/fs/nfs/unlink.c */ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); -extern void nfs_wait_on_sillyrename(struct dentry *dentry); -extern void nfs_block_sillyrename(struct dentry *dentry); -extern void nfs_unblock_sillyrename(struct dentry *dentry); /* * linux/fs/nfs/write.c diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 7fcc13c8cf1f..14a762d2734d 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -246,5 +246,6 @@ struct nfs_server { #define NFS_CAP_DEALLOCATE (1U << 21) #define NFS_CAP_LAYOUTSTATS (1U << 22) #define NFS_CAP_CLONE (1U << 23) +#define NFS_CAP_COPY (1U << 24) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index d320906cf13e..7cc0deee5bde 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -233,7 +233,6 @@ struct nfs4_layoutget_args { struct inode *inode; struct nfs_open_context *ctx; nfs4_stateid stateid; - unsigned long timestamp; struct nfs4_layoutdriver_data layout; }; @@ -251,7 +250,6 @@ struct nfs4_layoutget { struct nfs4_layoutget_res res; struct rpc_cred *cred; gfp_t gfp_flags; - long timeout; }; struct nfs4_getdeviceinfo_args { @@ -1187,17 +1185,6 @@ struct pnfs_ds_commit_info { struct pnfs_commit_bucket *buckets; }; -#define NFS4_OP_MAP_NUM_LONGS \ - DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long)) -#define NFS4_OP_MAP_NUM_WORDS \ - (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32)) -struct nfs4_op_map { - union { - unsigned long longs[NFS4_OP_MAP_NUM_LONGS]; - u32 words[NFS4_OP_MAP_NUM_WORDS]; - } u; -}; - struct nfs41_state_protection { u32 how; struct nfs4_op_map enforce; @@ -1343,6 +1330,32 @@ struct nfs42_falloc_res { const struct nfs_server *falloc_server; }; +struct nfs42_copy_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *src_fh; + nfs4_stateid src_stateid; + u64 src_pos; + + struct nfs_fh *dst_fh; + nfs4_stateid dst_stateid; + u64 dst_pos; + + u64 count; +}; + +struct nfs42_write_res { + u64 count; + struct nfs_writeverf verifier; +}; + +struct nfs42_copy_res { + struct nfs4_sequence_res seq_res; + struct nfs42_write_res write_res; + bool consecutive; + bool synchronous; +}; + struct nfs42_seek_args { struct nfs4_sequence_args seq_args; @@ -1431,7 +1444,7 @@ struct nfs_commit_completion_ops { }; struct nfs_commit_info { - spinlock_t *lock; /* inode->i_lock */ + struct inode *inode; /* Needed for inode->i_lock */ struct nfs_mds_commit_info *mds; struct pnfs_ds_commit_info *ds; struct nfs_direct_req *dreq; /* O_DIRECT request */ @@ -1468,10 +1481,10 @@ struct nfs_pgio_completion_ops { }; struct nfs_unlinkdata { - struct hlist_node list; struct nfs_removeargs args; struct nfs_removeres res; - struct inode *dir; + struct dentry *dentry; + wait_queue_head_t wq; struct rpc_cred *cred; struct nfs_fattr dir_attr; long timeout; @@ -1519,7 +1532,7 @@ struct nfs_rpc_ops { struct nfs_fattr *, struct nfs4_label *); int (*setattr) (struct dentry *, struct nfs_fattr *, struct iattr *); - int (*lookup) (struct inode *, struct qstr *, + int (*lookup) (struct inode *, const struct qstr *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); int (*access) (struct inode *, struct nfs_access_entry *); @@ -1527,18 +1540,18 @@ struct nfs_rpc_ops { unsigned int); int (*create) (struct inode *, struct dentry *, struct iattr *, int); - int (*remove) (struct inode *, struct qstr *); + int (*remove) (struct inode *, const struct qstr *); void (*unlink_setup) (struct rpc_message *, struct inode *dir); void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); int (*unlink_done) (struct rpc_task *, struct inode *); void (*rename_setup) (struct rpc_message *msg, struct inode *dir); void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); - int (*link) (struct inode *, struct inode *, struct qstr *); + int (*link) (struct inode *, struct inode *, const struct qstr *); int (*symlink) (struct inode *, struct dentry *, struct page *, unsigned int, struct iattr *); int (*mkdir) (struct inode *, struct dentry *, struct iattr *); - int (*rmdir) (struct inode *, struct qstr *); + int (*rmdir) (struct inode *, const struct qstr *); int (*readdir) (struct dentry *, struct rpc_cred *, u64, struct page **, unsigned int, int); int (*mknod) (struct inode *, struct dentry *, struct iattr *, @@ -1572,9 +1585,8 @@ struct nfs_rpc_ops { int (*have_delegation)(struct inode *, fmode_t); int (*return_delegation)(struct inode *); struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); - struct nfs_client * - (*init_client) (struct nfs_client *, const struct rpc_timeout *, - const char *); + struct nfs_client *(*init_client) (struct nfs_client *, + const struct nfs_client_initdata *); void (*free_client) (struct nfs_client *); struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *); struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h deleted file mode 100644 index e9fcf90b270d..000000000000 --- a/include/linux/nilfs2_fs.h +++ /dev/null @@ -1,919 +0,0 @@ -/* - * nilfs2_fs.h - NILFS2 on-disk structures and common declarations. - * - * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; either version 2.1 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net> - * Ryusuke Konishi <ryusuke@osrg.net> - */ -/* - * linux/include/linux/ext2_fs.h - * - * Copyright (C) 1992, 1993, 1994, 1995 - * Remy Card (card@masi.ibp.fr) - * Laboratoire MASI - Institut Blaise Pascal - * Universite Pierre et Marie Curie (Paris VI) - * - * from - * - * linux/include/linux/minix_fs.h - * - * Copyright (C) 1991, 1992 Linus Torvalds - */ - -#ifndef _LINUX_NILFS_FS_H -#define _LINUX_NILFS_FS_H - -#include <linux/types.h> -#include <linux/ioctl.h> -#include <linux/magic.h> -#include <linux/bug.h> - - -#define NILFS_INODE_BMAP_SIZE 7 -/** - * struct nilfs_inode - structure of an inode on disk - * @i_blocks: blocks count - * @i_size: size in bytes - * @i_ctime: creation time (seconds) - * @i_mtime: modification time (seconds) - * @i_ctime_nsec: creation time (nano seconds) - * @i_mtime_nsec: modification time (nano seconds) - * @i_uid: user id - * @i_gid: group id - * @i_mode: file mode - * @i_links_count: links count - * @i_flags: file flags - * @i_bmap: block mapping - * @i_xattr: extended attributes - * @i_generation: file generation (for NFS) - * @i_pad: padding - */ -struct nilfs_inode { - __le64 i_blocks; - __le64 i_size; - __le64 i_ctime; - __le64 i_mtime; - __le32 i_ctime_nsec; - __le32 i_mtime_nsec; - __le32 i_uid; - __le32 i_gid; - __le16 i_mode; - __le16 i_links_count; - __le32 i_flags; - __le64 i_bmap[NILFS_INODE_BMAP_SIZE]; -#define i_device_code i_bmap[0] - __le64 i_xattr; - __le32 i_generation; - __le32 i_pad; -}; - -#define NILFS_MIN_INODE_SIZE 128 - -/** - * struct nilfs_super_root - structure of super root - * @sr_sum: check sum - * @sr_bytes: byte count of the structure - * @sr_flags: flags (reserved) - * @sr_nongc_ctime: write time of the last segment not for cleaner operation - * @sr_dat: DAT file inode - * @sr_cpfile: checkpoint file inode - * @sr_sufile: segment usage file inode - */ -struct nilfs_super_root { - __le32 sr_sum; - __le16 sr_bytes; - __le16 sr_flags; - __le64 sr_nongc_ctime; - struct nilfs_inode sr_dat; - struct nilfs_inode sr_cpfile; - struct nilfs_inode sr_sufile; -}; - -#define NILFS_SR_MDT_OFFSET(inode_size, i) \ - ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \ - (inode_size) * (i)) -#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0) -#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1) -#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2) -#define NILFS_SR_BYTES(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 3) - -/* - * Maximal mount counts - */ -#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */ - -/* - * File system states (sbp->s_state, nilfs->ns_mount_state) - */ -#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */ -#define NILFS_ERROR_FS 0x0002 /* Errors detected */ -#define NILFS_RESIZE_FS 0x0004 /* Resize required */ - -/* - * Mount flags (sbi->s_mount_opt) - */ -#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */ -#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ -#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ -#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ -#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ -#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order - semantics also for data */ -#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during - mount-time recovery */ -#define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */ - - -/** - * struct nilfs_super_block - structure of super block on disk - */ -struct nilfs_super_block { -/*00*/ __le32 s_rev_level; /* Revision level */ - __le16 s_minor_rev_level; /* minor revision level */ - __le16 s_magic; /* Magic signature */ - - __le16 s_bytes; /* Bytes count of CRC calculation - for this structure. s_reserved - is excluded. */ - __le16 s_flags; /* flags */ - __le32 s_crc_seed; /* Seed value of CRC calculation */ -/*10*/ __le32 s_sum; /* Check sum of super block */ - - __le32 s_log_block_size; /* Block size represented as follows - blocksize = - 1 << (s_log_block_size + 10) */ - __le64 s_nsegments; /* Number of segments in filesystem */ -/*20*/ __le64 s_dev_size; /* block device size in bytes */ - __le64 s_first_data_block; /* 1st seg disk block number */ -/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */ - __le32 s_r_segments_percentage; /* Reserved segments percentage */ - - __le64 s_last_cno; /* Last checkpoint number */ -/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */ - __le64 s_last_seq; /* seq. number of seg written last */ -/*50*/ __le64 s_free_blocks_count; /* Free blocks count */ - - __le64 s_ctime; /* Creation time (execution time of - newfs) */ -/*60*/ __le64 s_mtime; /* Mount time */ - __le64 s_wtime; /* Write time */ -/*70*/ __le16 s_mnt_count; /* Mount count */ - __le16 s_max_mnt_count; /* Maximal mount count */ - __le16 s_state; /* File system state */ - __le16 s_errors; /* Behaviour when detecting errors */ - __le64 s_lastcheck; /* time of last check */ - -/*80*/ __le32 s_checkinterval; /* max. time between checks */ - __le32 s_creator_os; /* OS */ - __le16 s_def_resuid; /* Default uid for reserved blocks */ - __le16 s_def_resgid; /* Default gid for reserved blocks */ - __le32 s_first_ino; /* First non-reserved inode */ - -/*90*/ __le16 s_inode_size; /* Size of an inode */ - __le16 s_dat_entry_size; /* Size of a dat entry */ - __le16 s_checkpoint_size; /* Size of a checkpoint */ - __le16 s_segment_usage_size; /* Size of a segment usage */ - -/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ -/*A8*/ char s_volume_name[80]; /* volume name */ - -/*F8*/ __le32 s_c_interval; /* Commit interval of segment */ - __le32 s_c_block_max; /* Threshold of data amount for - the segment construction */ -/*100*/ __le64 s_feature_compat; /* Compatible feature set */ - __le64 s_feature_compat_ro; /* Read-only compatible feature set */ - __le64 s_feature_incompat; /* Incompatible feature set */ - __u32 s_reserved[186]; /* padding to the end of the block */ -}; - -/* - * Codes for operating systems - */ -#define NILFS_OS_LINUX 0 -/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */ - -/* - * Revision levels - */ -#define NILFS_CURRENT_REV 2 /* current major revision */ -#define NILFS_MINOR_REV 0 /* minor revision */ -#define NILFS_MIN_SUPP_REV 2 /* minimum supported revision */ - -/* - * Feature set definitions - * - * If there is a bit set in the incompatible feature set that the kernel - * doesn't know about, it should refuse to mount the filesystem. - */ -#define NILFS_FEATURE_COMPAT_RO_BLOCK_COUNT 0x00000001ULL - -#define NILFS_FEATURE_COMPAT_SUPP 0ULL -#define NILFS_FEATURE_COMPAT_RO_SUPP NILFS_FEATURE_COMPAT_RO_BLOCK_COUNT -#define NILFS_FEATURE_INCOMPAT_SUPP 0ULL - -/* - * Bytes count of super_block for CRC-calculation - */ -#define NILFS_SB_BYTES \ - ((long)&((struct nilfs_super_block *)0)->s_reserved) - -/* - * Special inode number - */ -#define NILFS_ROOT_INO 2 /* Root file inode */ -#define NILFS_DAT_INO 3 /* DAT file */ -#define NILFS_CPFILE_INO 4 /* checkpoint file */ -#define NILFS_SUFILE_INO 5 /* segment usage file */ -#define NILFS_IFILE_INO 6 /* ifile */ -#define NILFS_ATIME_INO 7 /* Atime file (reserved) */ -#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */ -#define NILFS_SKETCH_INO 10 /* Sketch file */ -#define NILFS_USER_INO 11 /* Fisrt user's file inode number */ - -#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */ - -#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in - a full segment */ -#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in - a partial segment */ -#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved - segments */ - -/* - * We call DAT, cpfile, and sufile root metadata files. Inodes of - * these files are written in super root block instead of ifile, and - * garbage collector doesn't keep any past versions of these files. - */ -#define NILFS_ROOT_METADATA_FILE(ino) \ - ((ino) >= NILFS_DAT_INO && (ino) <= NILFS_SUFILE_INO) - -/* - * bytes offset of secondary super block - */ -#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) - -/* - * Maximal count of links to a file - */ -#define NILFS_LINK_MAX 32000 - -/* - * Structure of a directory entry - * (Same as ext2) - */ - -#define NILFS_NAME_LEN 255 - -/* - * Block size limitations - */ -#define NILFS_MIN_BLOCK_SIZE 1024 -#define NILFS_MAX_BLOCK_SIZE 65536 - -/* - * The new version of the directory entry. Since V0 structures are - * stored in intel byte order, and the name_len field could never be - * bigger than 255 chars, it's safe to reclaim the extra byte for the - * file_type field. - */ -struct nilfs_dir_entry { - __le64 inode; /* Inode number */ - __le16 rec_len; /* Directory entry length */ - __u8 name_len; /* Name length */ - __u8 file_type; /* Dir entry type (file, dir, etc) */ - char name[NILFS_NAME_LEN]; /* File name */ - char pad; -}; - -/* - * NILFS directory file types. Only the low 3 bits are used. The - * other bits are reserved for now. - */ -enum { - NILFS_FT_UNKNOWN, - NILFS_FT_REG_FILE, - NILFS_FT_DIR, - NILFS_FT_CHRDEV, - NILFS_FT_BLKDEV, - NILFS_FT_FIFO, - NILFS_FT_SOCK, - NILFS_FT_SYMLINK, - NILFS_FT_MAX -}; - -/* - * NILFS_DIR_PAD defines the directory entries boundaries - * - * NOTE: It must be a multiple of 8 - */ -#define NILFS_DIR_PAD 8 -#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) -#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ - ~NILFS_DIR_ROUND) -#define NILFS_MAX_REC_LEN ((1<<16)-1) - -static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) -{ - unsigned len = le16_to_cpu(dlen); - -#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) - if (len == NILFS_MAX_REC_LEN) - return 1 << 16; -#endif - return len; -} - -static inline __le16 nilfs_rec_len_to_disk(unsigned len) -{ -#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) - if (len == (1 << 16)) - return cpu_to_le16(NILFS_MAX_REC_LEN); - else if (len > (1 << 16)) - BUG(); -#endif - return cpu_to_le16(len); -} - -/** - * struct nilfs_finfo - file information - * @fi_ino: inode number - * @fi_cno: checkpoint number - * @fi_nblocks: number of blocks (including intermediate blocks) - * @fi_ndatablk: number of file data blocks - */ -struct nilfs_finfo { - __le64 fi_ino; - __le64 fi_cno; - __le32 fi_nblocks; - __le32 fi_ndatablk; - /* array of virtual block numbers */ -}; - -/** - * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned - * @bi_vblocknr: virtual block number - * @bi_blkoff: block offset - */ -struct nilfs_binfo_v { - __le64 bi_vblocknr; - __le64 bi_blkoff; -}; - -/** - * struct nilfs_binfo_dat - information for the block which belongs to the DAT file - * @bi_blkoff: block offset - * @bi_level: level - * @bi_pad: padding - */ -struct nilfs_binfo_dat { - __le64 bi_blkoff; - __u8 bi_level; - __u8 bi_pad[7]; -}; - -/** - * union nilfs_binfo: block information - * @bi_v: nilfs_binfo_v structure - * @bi_dat: nilfs_binfo_dat structure - */ -union nilfs_binfo { - struct nilfs_binfo_v bi_v; - struct nilfs_binfo_dat bi_dat; -}; - -/** - * struct nilfs_segment_summary - segment summary header - * @ss_datasum: checksum of data - * @ss_sumsum: checksum of segment summary - * @ss_magic: magic number - * @ss_bytes: size of this structure in bytes - * @ss_flags: flags - * @ss_seq: sequence number - * @ss_create: creation timestamp - * @ss_next: next segment - * @ss_nblocks: number of blocks - * @ss_nfinfo: number of finfo structures - * @ss_sumbytes: total size of segment summary in bytes - * @ss_pad: padding - * @ss_cno: checkpoint number - */ -struct nilfs_segment_summary { - __le32 ss_datasum; - __le32 ss_sumsum; - __le32 ss_magic; - __le16 ss_bytes; - __le16 ss_flags; - __le64 ss_seq; - __le64 ss_create; - __le64 ss_next; - __le32 ss_nblocks; - __le32 ss_nfinfo; - __le32 ss_sumbytes; - __le32 ss_pad; - __le64 ss_cno; - /* array of finfo structures */ -}; - -#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */ - -/* - * Segment summary flags - */ -#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */ -#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */ -#define NILFS_SS_SR 0x0004 /* has super root */ -#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */ -#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ - -/** - * struct nilfs_btree_node - B-tree node - * @bn_flags: flags - * @bn_level: level - * @bn_nchildren: number of children - * @bn_pad: padding - */ -struct nilfs_btree_node { - __u8 bn_flags; - __u8 bn_level; - __le16 bn_nchildren; - __le32 bn_pad; -}; - -/* flags */ -#define NILFS_BTREE_NODE_ROOT 0x01 - -/* level */ -#define NILFS_BTREE_LEVEL_DATA 0 -#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) -#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */ - -/** - * struct nilfs_palloc_group_desc - block group descriptor - * @pg_nfrees: number of free entries in block group - */ -struct nilfs_palloc_group_desc { - __le32 pg_nfrees; -}; - -/** - * struct nilfs_dat_entry - disk address translation entry - * @de_blocknr: block number - * @de_start: start checkpoint number - * @de_end: end checkpoint number - * @de_rsv: reserved for future use - */ -struct nilfs_dat_entry { - __le64 de_blocknr; - __le64 de_start; - __le64 de_end; - __le64 de_rsv; -}; - -#define NILFS_MIN_DAT_ENTRY_SIZE 32 - -/** - * struct nilfs_snapshot_list - snapshot list - * @ssl_next: next checkpoint number on snapshot list - * @ssl_prev: previous checkpoint number on snapshot list - */ -struct nilfs_snapshot_list { - __le64 ssl_next; - __le64 ssl_prev; -}; - -/** - * struct nilfs_checkpoint - checkpoint structure - * @cp_flags: flags - * @cp_checkpoints_count: checkpoints count in a block - * @cp_snapshot_list: snapshot list - * @cp_cno: checkpoint number - * @cp_create: creation timestamp - * @cp_nblk_inc: number of blocks incremented by this checkpoint - * @cp_inodes_count: inodes count - * @cp_blocks_count: blocks count - * @cp_ifile_inode: inode of ifile - */ -struct nilfs_checkpoint { - __le32 cp_flags; - __le32 cp_checkpoints_count; - struct nilfs_snapshot_list cp_snapshot_list; - __le64 cp_cno; - __le64 cp_create; - __le64 cp_nblk_inc; - __le64 cp_inodes_count; - __le64 cp_blocks_count; - - /* Do not change the byte offset of ifile inode. - To keep the compatibility of the disk format, - additional fields should be added behind cp_ifile_inode. */ - struct nilfs_inode cp_ifile_inode; -}; - -#define NILFS_MIN_CHECKPOINT_SIZE (64 + NILFS_MIN_INODE_SIZE) - -/* checkpoint flags */ -enum { - NILFS_CHECKPOINT_SNAPSHOT, - NILFS_CHECKPOINT_INVALID, - NILFS_CHECKPOINT_SKETCH, - NILFS_CHECKPOINT_MINOR, -}; - -#define NILFS_CHECKPOINT_FNS(flag, name) \ -static inline void \ -nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \ -{ \ - cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \ - (1UL << NILFS_CHECKPOINT_##flag)); \ -} \ -static inline void \ -nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \ -{ \ - cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \ - ~(1UL << NILFS_CHECKPOINT_##flag)); \ -} \ -static inline int \ -nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \ -{ \ - return !!(le32_to_cpu(cp->cp_flags) & \ - (1UL << NILFS_CHECKPOINT_##flag)); \ -} - -NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot) -NILFS_CHECKPOINT_FNS(INVALID, invalid) -NILFS_CHECKPOINT_FNS(MINOR, minor) - -/** - * struct nilfs_cpinfo - checkpoint information - * @ci_flags: flags - * @ci_pad: padding - * @ci_cno: checkpoint number - * @ci_create: creation timestamp - * @ci_nblk_inc: number of blocks incremented by this checkpoint - * @ci_inodes_count: inodes count - * @ci_blocks_count: blocks count - * @ci_next: next checkpoint number in snapshot list - */ -struct nilfs_cpinfo { - __u32 ci_flags; - __u32 ci_pad; - __u64 ci_cno; - __u64 ci_create; - __u64 ci_nblk_inc; - __u64 ci_inodes_count; - __u64 ci_blocks_count; - __u64 ci_next; -}; - -#define NILFS_CPINFO_FNS(flag, name) \ -static inline int \ -nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \ -{ \ - return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \ -} - -NILFS_CPINFO_FNS(SNAPSHOT, snapshot) -NILFS_CPINFO_FNS(INVALID, invalid) -NILFS_CPINFO_FNS(MINOR, minor) - - -/** - * struct nilfs_cpfile_header - checkpoint file header - * @ch_ncheckpoints: number of checkpoints - * @ch_nsnapshots: number of snapshots - * @ch_snapshot_list: snapshot list - */ -struct nilfs_cpfile_header { - __le64 ch_ncheckpoints; - __le64 ch_nsnapshots; - struct nilfs_snapshot_list ch_snapshot_list; -}; - -#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \ - ((sizeof(struct nilfs_cpfile_header) + \ - sizeof(struct nilfs_checkpoint) - 1) / \ - sizeof(struct nilfs_checkpoint)) - -/** - * struct nilfs_segment_usage - segment usage - * @su_lastmod: last modified timestamp - * @su_nblocks: number of blocks in segment - * @su_flags: flags - */ -struct nilfs_segment_usage { - __le64 su_lastmod; - __le32 su_nblocks; - __le32 su_flags; -}; - -#define NILFS_MIN_SEGMENT_USAGE_SIZE 16 - -/* segment usage flag */ -enum { - NILFS_SEGMENT_USAGE_ACTIVE, - NILFS_SEGMENT_USAGE_DIRTY, - NILFS_SEGMENT_USAGE_ERROR, - - /* ... */ -}; - -#define NILFS_SEGMENT_USAGE_FNS(flag, name) \ -static inline void \ -nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \ -{ \ - su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \ - (1UL << NILFS_SEGMENT_USAGE_##flag));\ -} \ -static inline void \ -nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \ -{ \ - su->su_flags = \ - cpu_to_le32(le32_to_cpu(su->su_flags) & \ - ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \ -} \ -static inline int \ -nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \ -{ \ - return !!(le32_to_cpu(su->su_flags) & \ - (1UL << NILFS_SEGMENT_USAGE_##flag)); \ -} - -NILFS_SEGMENT_USAGE_FNS(ACTIVE, active) -NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty) -NILFS_SEGMENT_USAGE_FNS(ERROR, error) - -static inline void -nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su) -{ - su->su_lastmod = cpu_to_le64(0); - su->su_nblocks = cpu_to_le32(0); - su->su_flags = cpu_to_le32(0); -} - -static inline int -nilfs_segment_usage_clean(const struct nilfs_segment_usage *su) -{ - return !le32_to_cpu(su->su_flags); -} - -/** - * struct nilfs_sufile_header - segment usage file header - * @sh_ncleansegs: number of clean segments - * @sh_ndirtysegs: number of dirty segments - * @sh_last_alloc: last allocated segment number - */ -struct nilfs_sufile_header { - __le64 sh_ncleansegs; - __le64 sh_ndirtysegs; - __le64 sh_last_alloc; - /* ... */ -}; - -#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \ - ((sizeof(struct nilfs_sufile_header) + \ - sizeof(struct nilfs_segment_usage) - 1) / \ - sizeof(struct nilfs_segment_usage)) - -/** - * nilfs_suinfo - segment usage information - * @sui_lastmod: timestamp of last modification - * @sui_nblocks: number of written blocks in segment - * @sui_flags: segment usage flags - */ -struct nilfs_suinfo { - __u64 sui_lastmod; - __u32 sui_nblocks; - __u32 sui_flags; -}; - -#define NILFS_SUINFO_FNS(flag, name) \ -static inline int \ -nilfs_suinfo_##name(const struct nilfs_suinfo *si) \ -{ \ - return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \ -} - -NILFS_SUINFO_FNS(ACTIVE, active) -NILFS_SUINFO_FNS(DIRTY, dirty) -NILFS_SUINFO_FNS(ERROR, error) - -static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) -{ - return !si->sui_flags; -} - -/* ioctl */ -/** - * nilfs_suinfo_update - segment usage information update - * @sup_segnum: segment number - * @sup_flags: flags for which fields are active in sup_sui - * @sup_reserved: reserved necessary for alignment - * @sup_sui: segment usage information - */ -struct nilfs_suinfo_update { - __u64 sup_segnum; - __u32 sup_flags; - __u32 sup_reserved; - struct nilfs_suinfo sup_sui; -}; - -enum { - NILFS_SUINFO_UPDATE_LASTMOD, - NILFS_SUINFO_UPDATE_NBLOCKS, - NILFS_SUINFO_UPDATE_FLAGS, - __NR_NILFS_SUINFO_UPDATE_FIELDS, -}; - -#define NILFS_SUINFO_UPDATE_FNS(flag, name) \ -static inline void \ -nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \ -{ \ - sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \ -} \ -static inline void \ -nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \ -{ \ - sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \ -} \ -static inline int \ -nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \ -{ \ - return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\ -} - -NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod) -NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks) -NILFS_SUINFO_UPDATE_FNS(FLAGS, flags) - -enum { - NILFS_CHECKPOINT, - NILFS_SNAPSHOT, -}; - -/** - * struct nilfs_cpmode - change checkpoint mode structure - * @cm_cno: checkpoint number - * @cm_mode: mode of checkpoint - * @cm_pad: padding - */ -struct nilfs_cpmode { - __u64 cm_cno; - __u32 cm_mode; - __u32 cm_pad; -}; - -/** - * struct nilfs_argv - argument vector - * @v_base: pointer on data array from userspace - * @v_nmembs: number of members in data array - * @v_size: size of data array in bytes - * @v_flags: flags - * @v_index: start number of target data items - */ -struct nilfs_argv { - __u64 v_base; - __u32 v_nmembs; /* number of members */ - __u16 v_size; /* size of members */ - __u16 v_flags; - __u64 v_index; -}; - -/** - * struct nilfs_period - period of checkpoint numbers - * @p_start: start checkpoint number (inclusive) - * @p_end: end checkpoint number (exclusive) - */ -struct nilfs_period { - __u64 p_start; - __u64 p_end; -}; - -/** - * struct nilfs_cpstat - checkpoint statistics - * @cs_cno: checkpoint number - * @cs_ncps: number of checkpoints - * @cs_nsss: number of snapshots - */ -struct nilfs_cpstat { - __u64 cs_cno; - __u64 cs_ncps; - __u64 cs_nsss; -}; - -/** - * struct nilfs_sustat - segment usage statistics - * @ss_nsegs: number of segments - * @ss_ncleansegs: number of clean segments - * @ss_ndirtysegs: number of dirty segments - * @ss_ctime: creation time of the last segment - * @ss_nongc_ctime: creation time of the last segment not for GC - * @ss_prot_seq: least sequence number of segments which must not be reclaimed - */ -struct nilfs_sustat { - __u64 ss_nsegs; - __u64 ss_ncleansegs; - __u64 ss_ndirtysegs; - __u64 ss_ctime; - __u64 ss_nongc_ctime; - __u64 ss_prot_seq; -}; - -/** - * struct nilfs_vinfo - virtual block number information - * @vi_vblocknr: virtual block number - * @vi_start: start checkpoint number (inclusive) - * @vi_end: end checkpoint number (exclusive) - * @vi_blocknr: disk block number - */ -struct nilfs_vinfo { - __u64 vi_vblocknr; - __u64 vi_start; - __u64 vi_end; - __u64 vi_blocknr; -}; - -/** - * struct nilfs_vdesc - descriptor of virtual block number - * @vd_ino: inode number - * @vd_cno: checkpoint number - * @vd_vblocknr: virtual block number - * @vd_period: period of checkpoint numbers - * @vd_blocknr: disk block number - * @vd_offset: logical block offset inside a file - * @vd_flags: flags (data or node block) - * @vd_pad: padding - */ -struct nilfs_vdesc { - __u64 vd_ino; - __u64 vd_cno; - __u64 vd_vblocknr; - struct nilfs_period vd_period; - __u64 vd_blocknr; - __u64 vd_offset; - __u32 vd_flags; - __u32 vd_pad; -}; - -/** - * struct nilfs_bdesc - descriptor of disk block number - * @bd_ino: inode number - * @bd_oblocknr: disk block address (for skipping dead blocks) - * @bd_blocknr: disk block address - * @bd_offset: logical block offset inside a file - * @bd_level: level in the b-tree organization - * @bd_pad: padding - */ -struct nilfs_bdesc { - __u64 bd_ino; - __u64 bd_oblocknr; - __u64 bd_blocknr; - __u64 bd_offset; - __u32 bd_level; - __u32 bd_pad; -}; - -#define NILFS_IOCTL_IDENT 'n' - -#define NILFS_IOCTL_CHANGE_CPMODE \ - _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode) -#define NILFS_IOCTL_DELETE_CHECKPOINT \ - _IOW(NILFS_IOCTL_IDENT, 0x81, __u64) -#define NILFS_IOCTL_GET_CPINFO \ - _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv) -#define NILFS_IOCTL_GET_CPSTAT \ - _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat) -#define NILFS_IOCTL_GET_SUINFO \ - _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv) -#define NILFS_IOCTL_GET_SUSTAT \ - _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat) -#define NILFS_IOCTL_GET_VINFO \ - _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv) -#define NILFS_IOCTL_GET_BDESCS \ - _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv) -#define NILFS_IOCTL_CLEAN_SEGMENTS \ - _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5]) -#define NILFS_IOCTL_SYNC \ - _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) -#define NILFS_IOCTL_RESIZE \ - _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) -#define NILFS_IOCTL_SET_ALLOC_RANGE \ - _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2]) -#define NILFS_IOCTL_SET_SUINFO \ - _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv) - -#endif /* _LINUX_NILFS_FS_H */ diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 167342c2ce6b..0f6f6607f592 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -92,6 +92,8 @@ enum { IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, + IEEE802154_ATTR_PAD, + __IEEE802154_ATTR_MAX, }; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 6e85889cf9ab..f746e44d4046 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -43,8 +43,10 @@ * * int first_node(mask) Number lowest set bit, or MAX_NUMNODES * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES + * int next_node_in(node, mask) Next node past 'node', or wrap to first, + * or MAX_NUMNODES * int first_unset_node(mask) First node not set in mask, or - * MAX_NUMNODES. + * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * NODE_MASK_ALL Initializer - all bits set @@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp) return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } +/* + * Find the next present node in src, starting after node n, wrapping around to + * the first node in src if needed. Returns MAX_NUMNODES if src is empty. + */ +#define next_node_in(n, src) __next_node_in((n), &(src)) +int __next_node_in(int node, const nodemask_t *srcp); + static inline void init_nodemask_of_node(nodemask_t *mask, int node) { nodes_clear(*mask); diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h new file mode 100644 index 000000000000..bf240a3cbf99 --- /dev/null +++ b/include/linux/nvme-rdma.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _LINUX_NVME_RDMA_H +#define _LINUX_NVME_RDMA_H + +enum nvme_rdma_cm_fmt { + NVME_RDMA_CM_FMT_1_0 = 0x0, +}; + +enum nvme_rdma_cm_status { + NVME_RDMA_CM_INVALID_LEN = 0x01, + NVME_RDMA_CM_INVALID_RECFMT = 0x02, + NVME_RDMA_CM_INVALID_QID = 0x03, + NVME_RDMA_CM_INVALID_HSQSIZE = 0x04, + NVME_RDMA_CM_INVALID_HRQSIZE = 0x05, + NVME_RDMA_CM_NO_RSC = 0x06, + NVME_RDMA_CM_INVALID_IRD = 0x07, + NVME_RDMA_CM_INVALID_ORD = 0x08, +}; + +/** + * struct nvme_rdma_cm_req - rdma connect request + * + * @recfmt: format of the RDMA Private Data + * @qid: queue Identifier for the Admin or I/O Queue + * @hrqsize: host receive queue size to be created + * @hsqsize: host send queue size to be created + */ +struct nvme_rdma_cm_req { + __le16 recfmt; + __le16 qid; + __le16 hrqsize; + __le16 hsqsize; + u8 rsvd[24]; +}; + +/** + * struct nvme_rdma_cm_rep - rdma connect reply + * + * @recfmt: format of the RDMA Private Data + * @crqsize: controller receive queue size + */ +struct nvme_rdma_cm_rep { + __le16 recfmt; + __le16 crqsize; + u8 rsvd[28]; +}; + +/** + * struct nvme_rdma_cm_rej - rdma connect reject + * + * @recfmt: format of the RDMA Private Data + * @fsts: error status for the associated connect request + */ +struct nvme_rdma_cm_rej { + __le16 recfmt; + __le16 sts; +}; + +#endif /* _LINUX_NVME_RDMA_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index a55986f6fe38..7676557ce357 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -16,18 +16,90 @@ #define _LINUX_NVME_H #include <linux/types.h> +#include <linux/uuid.h> + +/* NQN names in commands fields specified one size */ +#define NVMF_NQN_FIELD_LEN 256 + +/* However the max length of a qualified name is another size */ +#define NVMF_NQN_SIZE 223 + +#define NVMF_TRSVCID_SIZE 32 +#define NVMF_TRADDR_SIZE 256 +#define NVMF_TSAS_SIZE 256 + +#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" + +#define NVME_RDMA_IP_PORT 4420 + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, /* Discovery type target subsystem */ + NVME_NQN_NVME = 2, /* NVME type target subsystem */ +}; + +/* Address Family codes for Discovery Log Page entry ADRFAM field */ +enum { + NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ + NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */ + NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ + NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ + NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ +}; + +/* Transport Type codes for Discovery Log Page entry TRTYPE field */ +enum { + NVMF_TRTYPE_RDMA = 1, /* RDMA */ + NVMF_TRTYPE_FC = 2, /* Fibre Channel */ + NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ + NVMF_TRTYPE_MAX, +}; + +/* Transport Requirements codes for Discovery Log Page entry TREQ field */ +enum { + NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ + NVMF_TREQ_REQUIRED = 1, /* Required */ + NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ +}; + +/* RDMA Connection Management Service Type codes for Discovery Log Page + * entry TSAS RDMA_CMS field + */ +enum { + NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ +}; + +#define NVMF_AQ_DEPTH 32 enum { NVME_REG_CAP = 0x0000, /* Controller Capabilities */ NVME_REG_VS = 0x0008, /* Version */ NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */ - NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */ + NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */ NVME_REG_CC = 0x0014, /* Controller Configuration */ NVME_REG_CSTS = 0x001c, /* Controller Status */ NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */ NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */ NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */ - NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */ + NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */ NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */ }; @@ -50,6 +122,13 @@ enum { #define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2) #define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1) +/* + * Submission and Completion Queue Entry Sizes for the NVM command set. + * (In bytes and specified as a power of two (2^n)). + */ +#define NVME_NVM_IOSQES 6 +#define NVME_NVM_IOCQES 4 + enum { NVME_CC_ENABLE = 1 << 0, NVME_CC_CSS_NVM = 0 << 4, @@ -61,8 +140,8 @@ enum { NVME_CC_SHN_NORMAL = 1 << 14, NVME_CC_SHN_ABRUPT = 2 << 14, NVME_CC_SHN_MASK = 3 << 14, - NVME_CC_IOSQES = 6 << 16, - NVME_CC_IOCQES = 4 << 20, + NVME_CC_IOSQES = NVME_NVM_IOSQES << 16, + NVME_CC_IOCQES = NVME_NVM_IOCQES << 20, NVME_CSTS_RDY = 1 << 0, NVME_CSTS_CFS = 1 << 1, NVME_CSTS_NSSRO = 1 << 4, @@ -107,7 +186,11 @@ struct nvme_id_ctrl { __u8 mdts; __le16 cntlid; __le32 ver; - __u8 rsvd84[172]; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[156]; __le16 oacs; __u8 acl; __u8 aerl; @@ -119,10 +202,12 @@ struct nvme_id_ctrl { __u8 apsta; __le16 wctemp; __le16 cctemp; - __u8 rsvd270[242]; + __u8 rsvd270[50]; + __le16 kas; + __u8 rsvd322[190]; __u8 sqes; __u8 cqes; - __u8 rsvd514[2]; + __le16 maxcmd; __le32 nn; __le16 oncs; __le16 fuses; @@ -135,7 +220,15 @@ struct nvme_id_ctrl { __le16 acwu; __u8 rsvd534[2]; __le32 sgls; - __u8 rsvd540[1508]; + __u8 rsvd540[228]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[244]; struct nvme_id_power_state psd[32]; __u8 vs[1024]; }; @@ -274,6 +367,12 @@ struct nvme_reservation_status { } regctl_ds[]; }; +enum nvme_async_event_type { + NVME_AER_TYPE_ERROR = 0, + NVME_AER_TYPE_SMART = 1, + NVME_AER_TYPE_NOTICE = 2, +}; + /* I/O commands */ enum nvme_opcode { @@ -290,6 +389,84 @@ enum nvme_opcode { nvme_cmd_resv_release = 0x15, }; +/* + * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier + * + * @NVME_SGL_FMT_ADDRESS: absolute address of the data block + * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block + * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation + * request subtype + */ +enum { + NVME_SGL_FMT_ADDRESS = 0x00, + NVME_SGL_FMT_OFFSET = 0x01, + NVME_SGL_FMT_INVALIDATE = 0x0f, +}; + +/* + * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier + * + * For struct nvme_sgl_desc: + * @NVME_SGL_FMT_DATA_DESC: data block descriptor + * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor + * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor + * + * For struct nvme_keyed_sgl_desc: + * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor + */ +enum { + NVME_SGL_FMT_DATA_DESC = 0x00, + NVME_SGL_FMT_SEG_DESC = 0x02, + NVME_SGL_FMT_LAST_SEG_DESC = 0x03, + NVME_KEY_SGL_FMT_DATA_DESC = 0x04, +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +/* + * Lowest two bits of our flags field (FUSE field in the spec): + * + * @NVME_CMD_FUSE_FIRST: Fused Operation, first command + * @NVME_CMD_FUSE_SECOND: Fused Operation, second command + * + * Highest two bits in our flags field (PSDT field in the spec): + * + * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer, + * If used, MPTR contains addr of single physical buffer (byte aligned). + * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer, + * If used, MPTR contains an address of an SGL segment containing + * exactly 1 SGL descriptor (qword aligned). + */ +enum { + NVME_CMD_FUSE_FIRST = (1 << 0), + NVME_CMD_FUSE_SECOND = (1 << 1), + + NVME_CMD_SGL_METABUF = (1 << 6), + NVME_CMD_SGL_METASEG = (1 << 7), + NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG, +}; + struct nvme_common_command { __u8 opcode; __u8 flags; @@ -297,8 +474,7 @@ struct nvme_common_command { __le32 nsid; __le32 cdw2[2]; __le64 metadata; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 cdw10[6]; }; @@ -309,8 +485,7 @@ struct nvme_rw_command { __le32 nsid; __u64 rsvd2; __le64 metadata; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le64 slba; __le16 length; __le16 control; @@ -350,8 +525,7 @@ struct nvme_dsm_cmd { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 nr; __le32 attributes; __u32 rsvd12[4]; @@ -384,6 +558,7 @@ enum nvme_admin_opcode { nvme_admin_async_event = 0x0c, nvme_admin_activate_fw = 0x10, nvme_admin_download_fw = 0x11, + nvme_admin_keep_alive = 0x18, nvme_admin_format_nvm = 0x80, nvme_admin_security_send = 0x81, nvme_admin_security_recv = 0x82, @@ -408,6 +583,7 @@ enum { NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_KATO = 0x0f, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_RESV_MASK = 0x82, @@ -415,6 +591,7 @@ enum { NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, + NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), NVME_FWACT_REPL_ACTV = (1 << 3), @@ -427,8 +604,7 @@ struct nvme_identify { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 cns; __u32 rsvd11[5]; }; @@ -439,8 +615,7 @@ struct nvme_features { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 fid; __le32 dword11; __u32 rsvd12[4]; @@ -499,8 +674,7 @@ struct nvme_download_firmware { __u8 flags; __u16 command_id; __u32 rsvd1[5]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 numd; __le32 offset; __u32 rsvd12[4]; @@ -516,6 +690,143 @@ struct nvme_format_cmd { __u32 rsvd11[5]; }; +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 rsvd10; + __le16 numdl; + __le16 numdu; + __u16 rsvd11; + __le32 lpol; + __le32 lpou; + __u32 rsvd14[2]; +}; + +/* + * Fabrics subcommands. + */ +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 0x7f, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0x00, + nvme_fabrics_type_connect = 0x01, + nvme_fabrics_type_property_get = 0x04, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +/* + * The legal cntlid range a NVMe Target will provide. + * Note that cntlid of value 0 is considered illegal in the fabrics world. + * Devices based on earlier specs did not have the subsystem concept; + * therefore, those devices had their cntlid value set to 0 as a result. + */ +#define NVME_CNTLID_MIN 1 +#define NVME_CNTLID_MAX 0xffef +#define NVME_CNTLID_DYNAMIC 0xffff + +#define MAX_DISC_LOGS 255 + +/* Discovery log page entry */ +struct nvmf_disc_rsp_page_entry { + __u8 trtype; + __u8 adrfam; + __u8 nqntype; + __u8 treq; + __le16 portid; + __le16 cntlid; + __le16 asqsz; + __u8 resv8[22]; + char trsvcid[NVMF_TRSVCID_SIZE]; + __u8 resv64[192]; + char subnqn[NVMF_NQN_FIELD_LEN]; + char traddr[NVMF_TRADDR_SIZE]; + union tsas { + char common[NVMF_TSAS_SIZE]; + struct rdma { + __u8 qptype; + __u8 prtype; + __u8 cms; + __u8 resv3[5]; + __u16 pkey; + __u8 resv10[246]; + } rdma; + } tsas; +}; + +/* Discovery log page header */ +struct nvmf_disc_rsp_page_hdr { + __le64 genctr; + __le64 numrec; + __le16 recfmt; + __u8 resv14[1006]; + struct nvmf_disc_rsp_page_entry entries[0]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_connect_data { + uuid_be hostid; + __le16 cntlid; + char resv4[238]; + char subsysnqn[NVMF_NQN_FIELD_LEN]; + char hostnqn[NVMF_NQN_FIELD_LEN]; + char resv5[256]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + struct nvme_command { union { struct nvme_common_command common; @@ -529,10 +840,30 @@ struct nvme_command { struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; }; }; +static inline bool nvme_is_write(struct nvme_command *cmd) +{ + /* + * What a mess... + * + * Why can't we simply have a Fabrics In and Fabrics out command? + */ + if (unlikely(cmd->common.opcode == nvme_fabrics_command)) + return cmd->fabrics.opcode & 1; + return cmd->common.opcode & 1; +} + enum { + /* + * Generic Command Status: + */ NVME_SC_SUCCESS = 0x0, NVME_SC_INVALID_OPCODE = 0x1, NVME_SC_INVALID_FIELD = 0x2, @@ -551,10 +882,18 @@ enum { NVME_SC_SGL_INVALID_DATA = 0xf, NVME_SC_SGL_INVALID_METADATA = 0x10, NVME_SC_SGL_INVALID_TYPE = 0x11, + + NVME_SC_SGL_INVALID_OFFSET = 0x16, + NVME_SC_SGL_INVALID_SUBTYPE = 0x17, + NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, NVME_SC_RESERVATION_CONFLICT = 0x83, + + /* + * Command Specific Status: + */ NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -572,9 +911,29 @@ enum { NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, NVME_SC_FEATURE_NOT_PER_NS = 0x10f, NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + + /* + * I/O Command Set Specific - NVM commands: + */ NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, + + /* + * I/O Command Set Specific - Fabrics commands: + */ + NVME_SC_CONNECT_FORMAT = 0x180, + NVME_SC_CONNECT_CTRL_BUSY = 0x181, + NVME_SC_CONNECT_INVALID_PARAM = 0x182, + NVME_SC_CONNECT_RESTART_DISC = 0x183, + NVME_SC_CONNECT_INVALID_HOST = 0x184, + + NVME_SC_DISCOVERY_RESTART = 0x190, + NVME_SC_AUTH_REQUIRED = 0x191, + + /* + * Media and Data Integrity Errors: + */ NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -582,12 +941,19 @@ enum { NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_DNR = 0x4000, }; struct nvme_completion { - __le32 result; /* Used by admin commands to return data */ - __u32 rsvd; + /* + * Used by Admin and Fabrics commands to return data: + */ + union { + __le16 result16; + __le32 result; + __le64 result64; + }; __le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_id; /* submission queue that generated this entry */ __u16 command_id; /* of the command which completed */ diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 9bb77d3ed6e0..c2256d746543 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell) { } -static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index a4fcc90b0f20..cd93416d762e 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -14,6 +14,10 @@ struct nvmem_device; struct nvmem_cell_info; +typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, + void *val, size_t bytes); +typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, + void *val, size_t bytes); struct nvmem_config { struct device *dev; @@ -24,6 +28,12 @@ struct nvmem_config { int ncells; bool read_only; bool root_only; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; /* To be only used by old driver/misc/eeprom drivers */ bool compat; struct device *base_dev; diff --git a/include/linux/of.h b/include/linux/of.h index 31758036787c..3d9ff8e9d803 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -75,6 +75,23 @@ struct of_phandle_args { uint32_t args[MAX_PHANDLE_ARGS]; }; +struct of_phandle_iterator { + /* Common iterator information */ + const char *cells_name; + int cell_count; + const struct device_node *parent; + + /* List size information */ + const __be32 *list_end; + const __be32 *phandle_end; + + /* Current position state */ + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + struct of_reconfig_data { struct device_node *dn; struct property *prop; @@ -221,13 +238,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 #endif -/* Default string compare functions, Allow arch asm/prom.h to override */ -#if !defined(of_compat_cmp) -#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) -#define of_prop_cmp(s1, s2) strcmp((s1), (s2)) -#define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) -#endif - #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) @@ -307,6 +317,8 @@ extern int of_property_read_string_helper(const struct device_node *np, const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, const char *); +extern int of_device_compatible_match(struct device_node *device, + const char *const *compat); extern bool of_device_is_available(const struct device_node *device); extern bool of_device_is_big_endian(const struct device_node *device); extern const void *of_get_property(const struct device_node *node, @@ -334,6 +346,18 @@ extern int of_parse_phandle_with_fixed_args(const struct device_node *np, extern int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name); +/* phandle iterator functions */ +extern int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count); + +extern int of_phandle_iterator_next(struct of_phandle_iterator *it); +extern int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size); + extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); @@ -585,7 +609,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np, return NULL; } -static inline int of_parse_phandle_with_args(struct device_node *np, +static inline int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, @@ -608,6 +632,27 @@ static inline int of_count_phandle_with_args(struct device_node *np, return -ENOSYS; } +static inline int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_next(struct of_phandle_iterator *it) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size) +{ + return 0; +} + static inline int of_alias_get_id(struct device_node *np, const char *stem) { return -ENOSYS; @@ -676,6 +721,13 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ +/* Default string compare functions, Allow arch asm/prom.h to override */ +#if !defined(of_compat_cmp) +#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) +#define of_prop_cmp(s1, s2) strcmp((s1), (s2)) +#define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) +#endif + #if defined(CONFIG_OF) && defined(CONFIG_NUMA) extern int of_node_to_nid(struct device_node *np); #else @@ -685,6 +737,15 @@ static inline int of_node_to_nid(struct device_node *device) } #endif +#ifdef CONFIG_OF_NUMA +extern int of_numa_init(void); +#else +static inline int of_numa_init(void) +{ + return -ENOSYS; +} +#endif + static inline struct device_node *of_find_matching_node( struct device_node *from, const struct of_device_id *matches) @@ -868,6 +929,12 @@ static inline int of_property_read_s32(const struct device_node *np, return of_property_read_u32(np, propname, (u32*) out_value); } +#define of_for_each_phandle(it, err, np, ln, cn, cc) \ + for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \ + err = of_phandle_iterator_next(it); \ + err == 0; \ + err = of_phandle_iterator_next(it)) + #define of_property_for_each_u32(np, propname, prop, p, u) \ for (prop = of_find_property(np, propname, NULL), \ p = of_prop_next_u32(prop, NULL, &u); \ @@ -944,10 +1011,13 @@ static inline int of_get_available_child_count(const struct device_node *np) #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); +typedef int (*of_init_fn_1_ret)(struct device_node *); typedef void (*of_init_fn_1)(struct device_node *); #define OF_DECLARE_1(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_1) +#define OF_DECLARE_1_RET(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) #define OF_DECLARE_2(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_2) diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 01c0a556448b..37864734ca50 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -47,10 +47,6 @@ void __iomem *of_io_request_and_map(struct device_node *device, extern const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags); -extern int pci_register_io_range(phys_addr_t addr, resource_size_t size); -extern unsigned long pci_address_to_pio(phys_addr_t addr); -extern phys_addr_t pci_pio_to_address(unsigned long pio); - extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); extern struct of_pci_range *of_pci_range_parser_one( @@ -86,11 +82,6 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index, return NULL; } -static inline phys_addr_t pci_pio_to_address(unsigned long pio) -{ - return 0; -} - static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node) { diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 2fbe8682a66f..26c3302ae58f 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -37,8 +37,9 @@ extern bool of_fdt_is_big_endian(const void *blob, unsigned long node); extern int of_fdt_match(const void *blob, unsigned long node, const char *const *compat); -extern void of_fdt_unflatten_tree(const unsigned long *blob, - struct device_node **mynodes); +extern void *of_fdt_unflatten_tree(const unsigned long *blob, + struct device_node *dad, + struct device_node **mynodes); /* TBD: Temporary export of fdt globals - remove when code fully merged */ extern int __initdata dt_root_addr_cells; @@ -52,6 +53,8 @@ extern char __dtb_end[]; extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, int depth, void *data), void *data); +extern int of_get_flat_dt_subnode_by_name(unsigned long node, + const char *uname); extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, int *size); extern int of_flat_dt_is_compatible(unsigned long node, const char *name); diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index f8bcd0e21a26..bb3a5a2cd570 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -15,6 +15,7 @@ #define __LINUX_OF_GRAPH_H #include <linux/types.h> +#include <linux/errno.h> /** * struct of_endpoint - the OF graph endpoint data structure diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index ffbe4707d4aa..e80b9c762a03 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -11,8 +11,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix, int index, unsigned long *busno, dma_addr_t *addr, size_t *size); -extern void of_iommu_init(void); -extern struct iommu_ops *of_iommu_configure(struct device *dev, +extern const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np); #else @@ -24,8 +23,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, return -EINVAL; } -static inline void of_iommu_init(void) { } -static inline struct iommu_ops *of_iommu_configure(struct device *dev, +static inline const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np) { return NULL; @@ -33,8 +31,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev, #endif /* CONFIG_OF_IOMMU */ -void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); -struct iommu_ops *of_iommu_get_ops(struct device_node *np); +void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops); +const struct iommu_ops *of_iommu_get_ops(struct device_node *np); extern struct of_device_id __iommu_of_table; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 8f2237eb3485..2ab233661ae5 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -19,12 +19,17 @@ extern struct phy_device *of_phy_connect(struct net_device *dev, struct device_node *phy_np, void (*hndlr)(struct net_device *), u32 flags, phy_interface_t iface); +extern struct phy_device * +of_phy_get_and_connect(struct net_device *dev, struct device_node *np, + void (*hndlr)(struct net_device *)); struct phy_device *of_phy_attach(struct net_device *dev, struct device_node *phy_np, u32 flags, phy_interface_t iface); extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); +extern int of_phy_register_fixed_link(struct device_node *np); +extern bool of_phy_is_fixed_link(struct device_node *np); #else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) @@ -50,6 +55,13 @@ static inline struct phy_device *of_phy_connect(struct net_device *dev, return NULL; } +static inline struct phy_device * +of_phy_get_and_connect(struct net_device *dev, struct device_node *np, + void (*hndlr)(struct net_device *)) +{ + return NULL; +} + static inline struct phy_device *of_phy_attach(struct net_device *dev, struct device_node *phy_np, u32 flags, phy_interface_t iface) @@ -67,12 +79,6 @@ static inline int of_mdio_parse_addr(struct device *dev, { return -ENOSYS; } -#endif /* CONFIG_OF */ - -#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) -extern int of_phy_register_fixed_link(struct device_node *np); -extern bool of_phy_is_fixed_link(struct device_node *np); -#else static inline int of_phy_register_fixed_link(struct device_node *np) { return -ENOSYS; diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h deleted file mode 100644 index e266caa36402..000000000000 --- a/include/linux/of_mtd.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> - * - * OF helpers for mtd. - * - * This file is released under the GPLv2 - */ - -#ifndef __LINUX_OF_MTD_H -#define __LINUX_OF_MTD_H - -#ifdef CONFIG_OF_MTD - -#include <linux/of.h> -int of_get_nand_ecc_mode(struct device_node *np); -int of_get_nand_ecc_step_size(struct device_node *np); -int of_get_nand_ecc_strength(struct device_node *np); -int of_get_nand_bus_width(struct device_node *np); -bool of_get_nand_on_flash_bbt(struct device_node *np); - -#else /* CONFIG_OF_MTD */ - -static inline int of_get_nand_ecc_mode(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_ecc_step_size(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_ecc_strength(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_bus_width(struct device_node *np) -{ - return -ENOSYS; -} - -static inline bool of_get_nand_on_flash_bbt(struct device_node *np) -{ - return false; -} - -#endif /* CONFIG_OF_MTD */ - -#endif /* __LINUX_OF_MTD_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index f6e9e85164e8..b969e9443962 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -8,7 +8,7 @@ struct pci_dev; struct of_phandle_args; struct device_node; -#ifdef CONFIG_OF +#ifdef CONFIG_OF_PCI int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn); diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index ad2f67054372..f8e1992d6423 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -1,7 +1,8 @@ #ifndef __OF_RESERVED_MEM_H #define __OF_RESERVED_MEM_H -struct device; +#include <linux/device.h> + struct of_phandle_args; struct reserved_mem_ops; @@ -28,14 +29,24 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) #ifdef CONFIG_OF_RESERVED_MEM -int of_reserved_mem_device_init(struct device *dev); + +int of_reserved_mem_device_init_by_idx(struct device *dev, + struct device_node *np, int idx); void of_reserved_mem_device_release(struct device *dev); +int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, + phys_addr_t align, + phys_addr_t start, + phys_addr_t end, + bool nomap, + phys_addr_t *res_base); + void fdt_init_reserved_mem(void); void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size); #else -static inline int of_reserved_mem_device_init(struct device *dev) +static inline int of_reserved_mem_device_init_by_idx(struct device *dev, + struct device_node *np, int idx) { return -ENOSYS; } @@ -46,4 +57,19 @@ static inline void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size) { } #endif +/** + * of_reserved_mem_device_init() - assign reserved memory region to given device + * @dev: Pointer to the device to configure + * + * This function assigns respective DMA-mapping operations based on the first + * reserved memory region specified by 'memory-region' property in device tree + * node of the given device. + * + * Returns error code or zero on success. + */ +static inline int of_reserved_mem_device_init(struct device *dev) +{ + return of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0); +} + #endif /* __OF_RESERVED_MEM_H */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index d833eb4dd446..9e9d79e8efa5 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -7,161 +7,53 @@ * option) any later version. */ -/* Maximum Number of Chip Selects */ -#define GPMC_CS_NUM 8 +#include <linux/platform_data/gpmc-omap.h> #define GPMC_CONFIG_WP 0x00000005 -#define GPMC_IRQ_FIFOEVENTENABLE 0x01 -#define GPMC_IRQ_COUNT_EVENT 0x02 - -#define GPMC_BURST_4 4 /* 4 word burst */ -#define GPMC_BURST_8 8 /* 8 word burst */ -#define GPMC_BURST_16 16 /* 16 word burst */ -#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ -#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ -#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ -#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ - -/* bool type time settings */ -struct gpmc_bool_timings { - bool cycle2cyclediffcsen; - bool cycle2cyclesamecsen; - bool we_extra_delay; - bool oe_extra_delay; - bool adv_extra_delay; - bool cs_extra_delay; - bool time_para_granularity; -}; +/* IRQ numbers in GPMC IRQ domain for legacy boot use */ +#define GPMC_IRQ_FIFOEVENTENABLE 0 +#define GPMC_IRQ_COUNT_EVENT 1 -/* - * Note that all values in this struct are in nanoseconds except sync_clk - * (which is in picoseconds), while the register values are in gpmc_fck cycles. +/** + * gpmc_nand_ops - Interface between NAND and GPMC + * @nand_write_buffer_empty: get the NAND write buffer empty status. */ -struct gpmc_timings { - /* Minimum clock period for synchronous mode (in picoseconds) */ - u32 sync_clk; - - /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ - u32 cs_on; /* Assertion time */ - u32 cs_rd_off; /* Read deassertion time */ - u32 cs_wr_off; /* Write deassertion time */ - - /* ADV signal timings corresponding to GPMC_CONFIG3 */ - u32 adv_on; /* Assertion time */ - u32 adv_rd_off; /* Read deassertion time */ - u32 adv_wr_off; /* Write deassertion time */ - u32 adv_aad_mux_on; /* ADV assertion time for AAD */ - u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ - u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ - - /* WE signals timings corresponding to GPMC_CONFIG4 */ - u32 we_on; /* WE assertion time */ - u32 we_off; /* WE deassertion time */ - - /* OE signals timings corresponding to GPMC_CONFIG4 */ - u32 oe_on; /* OE assertion time */ - u32 oe_off; /* OE deassertion time */ - u32 oe_aad_mux_on; /* OE assertion time for AAD */ - u32 oe_aad_mux_off; /* OE deassertion time for AAD */ - - /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ - u32 page_burst_access; /* Multiple access word delay */ - u32 access; /* Start-cycle to first data valid delay */ - u32 rd_cycle; /* Total read cycle time */ - u32 wr_cycle; /* Total write cycle time */ - - u32 bus_turnaround; - u32 cycle2cycle_delay; - - u32 wait_monitoring; - u32 clk_activation; - - /* The following are only on OMAP3430 */ - u32 wr_access; /* WRACCESSTIME */ - u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ - - struct gpmc_bool_timings bool_timings; +struct gpmc_nand_ops { + bool (*nand_writebuffer_empty)(void); }; -/* Device timings in picoseconds */ -struct gpmc_device_timings { - u32 t_ceasu; /* address setup to CS valid */ - u32 t_avdasu; /* address setup to ADV valid */ - /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is - * of tusb using these timings even for sync whilst - * ideally for adv_rd/(wr)_off it should have considered - * t_avdh instead. This indirectly necessitates r/w - * variations of t_avdp as it is possible to have one - * sync & other async - */ - u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ - u32 t_avdp_w; - u32 t_aavdh; /* address hold time */ - u32 t_oeasu; /* address setup to OE valid */ - u32 t_aa; /* access time from ADV assertion */ - u32 t_iaa; /* initial access time */ - u32 t_oe; /* access time from OE assertion */ - u32 t_ce; /* access time from CS asertion */ - u32 t_rd_cycle; /* read cycle time */ - u32 t_cez_r; /* read CS deassertion to high Z */ - u32 t_cez_w; /* write CS deassertion to high Z */ - u32 t_oez; /* OE deassertion to high Z */ - u32 t_weasu; /* address setup to WE valid */ - u32 t_wpl; /* write assertion time */ - u32 t_wph; /* write deassertion time */ - u32 t_wr_cycle; /* write cycle time */ - - u32 clk; - u32 t_bacc; /* burst access valid clock to output delay */ - u32 t_ces; /* CS setup time to clk */ - u32 t_avds; /* ADV setup time to clk */ - u32 t_avdh; /* ADV hold time from clk */ - u32 t_ach; /* address hold time from clk */ - u32 t_rdyo; /* clk to ready valid */ - - u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ - u32 t_ce_avd; /* CS on to ADV on delay */ - - /* XXX: check the possibility of combining - * cyc_aavhd_oe & cyc_aavdh_we - */ - u8 cyc_aavdh_oe;/* read address hold time in cycles */ - u8 cyc_aavdh_we;/* write address hold time in cycles */ - u8 cyc_oe; /* access time from OE assertion in cycles */ - u8 cyc_wpl; /* write deassertion time in cycles */ - u32 cyc_iaa; /* initial access time in cycles */ - - /* extra delays */ - bool ce_xdelay; - bool avd_xdelay; - bool oe_xdelay; - bool we_xdelay; -}; +struct gpmc_nand_regs; -struct gpmc_settings { - bool burst_wrap; /* enables wrap bursting */ - bool burst_read; /* enables read page/burst mode */ - bool burst_write; /* enables write page/burst mode */ - bool device_nand; /* device is NAND */ - bool sync_read; /* enables synchronous reads */ - bool sync_write; /* enables synchronous writes */ - bool wait_on_read; /* monitor wait on reads */ - bool wait_on_write; /* monitor wait on writes */ - u32 burst_len; /* page/burst length */ - u32 device_width; /* device bus width (8 or 16 bit) */ - u32 mux_add_data; /* multiplex address & data */ - u32 wait_pin; /* wait-pin to be used */ -}; +#if IS_ENABLED(CONFIG_OMAP_GPMC) +struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs); +#else +static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs) +{ + return NULL; +} +#endif /* CONFIG_OMAP_GPMC */ + +/*--------------------------------*/ + +/* deprecated APIs */ +#if IS_ENABLED(CONFIG_OMAP_GPMC) +void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); +#else +static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) +{ +} +#endif /* CONFIG_OMAP_GPMC */ +/*--------------------------------*/ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, struct gpmc_settings *gpmc_s, struct gpmc_device_timings *dev_t); -struct gpmc_nand_regs; struct device_node; -extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); extern int gpmc_get_client_irq(unsigned irq_config); extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index 587bbdd31f5a..c726bd833761 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -21,8 +21,6 @@ struct mbox_client; struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, const char *chan_name); -void omap_mbox_save_ctx(struct mbox_chan *chan); -void omap_mbox_restore_ctx(struct mbox_chan *chan); void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); diff --git a/include/linux/oom.h b/include/linux/oom.h index 628a43242a34..5bc0457ee3a8 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -23,6 +23,9 @@ struct oom_control { /* Used to determine mempolicy */ nodemask_t *nodemask; + /* Memory cgroup in which oom is invoked, or NULL for global oom */ + struct mem_cgroup *memcg; + /* Used to determine cpuset and node locality requirement */ const gfp_t gfp_mask; @@ -50,42 +53,46 @@ enum oom_scan_t { OOM_SCAN_SELECT, /* always select this thread first */ }; -/* Thread is the potential origin of an oom condition; kill first on oom */ -#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) - extern struct mutex oom_lock; static inline void set_current_oom_origin(void) { - current->signal->oom_flags |= OOM_FLAG_ORIGIN; + current->signal->oom_flag_origin = true; } static inline void clear_current_oom_origin(void) { - current->signal->oom_flags &= ~OOM_FLAG_ORIGIN; + current->signal->oom_flag_origin = false; } static inline bool oom_task_origin(const struct task_struct *p) { - return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); + return p->signal->oom_flag_origin; } extern void mark_oom_victim(struct task_struct *tsk); +#ifdef CONFIG_MMU +extern void wake_oom_reaper(struct task_struct *tsk); +#else +static inline void wake_oom_reaper(struct task_struct *tsk) +{ +} +#endif + extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, - struct mem_cgroup *memcg, const char *message); + const char *message); extern void check_panic_on_oom(struct oom_control *oc, - enum oom_constraint constraint, - struct mem_cgroup *memcg); + enum oom_constraint constraint); extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, - struct task_struct *task, unsigned long totalpages); + struct task_struct *task); extern bool out_of_memory(struct oom_control *oc); @@ -100,16 +107,7 @@ extern void oom_killer_enable(void); extern struct task_struct *find_lock_task_mm(struct task_struct *p); -static inline bool task_will_free_mem(struct task_struct *task) -{ - /* - * A coredumping process may sleep for an extended period in exit_mm(), - * so the oom killer cannot assume that the process will promptly exit - * and release memory. - */ - return (task->flags & PF_EXITING) && - !(task->signal->flags & SIGNAL_GROUP_COREDUMP); -} +bool task_will_free_mem(struct task_struct *task); /* sysctls */ extern int sysctl_oom_dump_tasks; diff --git a/include/linux/padata.h b/include/linux/padata.h index 438694650471..113ee626a4dc 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -175,11 +175,6 @@ extern int padata_do_parallel(struct padata_instance *pinst, extern void padata_do_serial(struct padata_priv *padata); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, - cpumask_var_t cbcpumask); -extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); -extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); extern int padata_register_cpumask_notifier(struct padata_instance *pinst, diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b052aa7b5b7..74e4dda91238 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -129,6 +129,9 @@ enum pageflags { /* Compound pages. Stored in first tail page's flags */ PG_double_map = PG_private_2, + + /* non-lru isolated movable page */ + PG_isolated = PG_reclaim, }; #ifndef __GENERATING_BOUNDS_H @@ -292,11 +295,11 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) */ TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) -PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND) +PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ -PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND) - TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND) +PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) + TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) @@ -357,19 +360,26 @@ PAGEFLAG(Idle, idle, PF_ANY) * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. * * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, - * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; - * and then page->mapping points, not to an anon_vma, but to a private + * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON + * bit; and then page->mapping points, not to an anon_vma, but to a private * structure which KSM associates with that merged page. See ksm.h. * - * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable + * page and then page->mapping points a struct address_space. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ -#define PAGE_MAPPING_ANON 1 -#define PAGE_MAPPING_KSM 2 -#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) +#define PAGE_MAPPING_ANON 0x1 +#define PAGE_MAPPING_MOVABLE 0x2 +#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) + +static __always_inline int PageMappingFlags(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; +} static __always_inline int PageAnon(struct page *page) { @@ -377,6 +387,12 @@ static __always_inline int PageAnon(struct page *page) return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; } +static __always_inline int __PageMovable(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + PAGE_MAPPING_MOVABLE; +} + #ifdef CONFIG_KSM /* * A KSM page is one of those write-protected "shared pages" or "merged pages" @@ -388,7 +404,7 @@ static __always_inline int PageKsm(struct page *page) { page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + PAGE_MAPPING_KSM; } #else TESTPAGEFLAG_FALSE(Ksm) @@ -474,7 +490,7 @@ static inline void ClearPageCompound(struct page *page) } #endif -#define PG_head_mask ((1L << PG_head)) +#define PG_head_mask ((1UL << PG_head)) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); @@ -565,6 +581,17 @@ static inline int PageDoubleMap(struct page *page) return PageHead(page) && test_bit(PG_double_map, &page[1].flags); } +static inline void SetPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + set_bit(PG_double_map, &page[1].flags); +} + +static inline void ClearPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + clear_bit(PG_double_map, &page[1].flags); +} static inline int TestSetPageDoubleMap(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); @@ -582,59 +609,59 @@ TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) -TESTPAGEFLAG_FALSE(DoubleMap) +PAGEFLAG_FALSE(DoubleMap) TESTSETFLAG_FALSE(DoubleMap) TESTCLEARFLAG_FALSE(DoubleMap) #endif /* + * For pages that are never mapped to userspace, page->mapcount may be + * used for storing extra information about page type. Any value used + * for this purpose must be <= -2, but it's better start not too close + * to -2 so that an underflow of the page_mapcount() won't be mistaken + * for a special page. + */ +#define PAGE_MAPCOUNT_OPS(uname, lname) \ +static __always_inline int Page##uname(struct page *page) \ +{ \ + return atomic_read(&page->_mapcount) == \ + PAGE_##lname##_MAPCOUNT_VALUE; \ +} \ +static __always_inline void __SetPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ + atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ +} \ +static __always_inline void __ClearPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(!Page##uname(page), page); \ + atomic_set(&page->_mapcount, -1); \ +} + +/* * PageBuddy() indicate that the page is free and in the buddy system * (see mm/page_alloc.c). - * - * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to - * -2 so that an underflow of the page_mapcount() won't be mistaken - * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very - * efficiently by most CPU architectures. */ -#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) - -static inline int PageBuddy(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; -} +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) +PAGE_MAPCOUNT_OPS(Buddy, BUDDY) -static inline void __SetPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); -} +/* + * PageBalloon() is set on pages that are on the balloon page list + * (see mm/balloon_compaction.c). + */ +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) +PAGE_MAPCOUNT_OPS(Balloon, BALLOON) -static inline void __ClearPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBuddy(page), page); - atomic_set(&page->_mapcount, -1); -} +/* + * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on + * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. + */ +#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) +PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG) extern bool is_free_buddy_page(struct page *page); -#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) - -static inline int PageBalloon(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; -} - -static inline void __SetPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); -} - -static inline void __ClearPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBalloon(page), page); - atomic_set(&page->_mapcount, -1); -} +__PAGEFLAG(Isolated, isolated, PF_ANY); /* * If network-based swap is enabled, sl*b must keep track of whether pages @@ -665,7 +692,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) } #ifdef CONFIG_MMU -#define __PG_MLOCKED (1 << PG_mlocked) +#define __PG_MLOCKED (1UL << PG_mlocked) #else #define __PG_MLOCKED 0 #endif @@ -675,11 +702,11 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * these flags set. It they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ - (1 << PG_lru | 1 << PG_locked | \ - 1 << PG_private | 1 << PG_private_2 | \ - 1 << PG_writeback | 1 << PG_reserved | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED) + (1UL << PG_lru | 1UL << PG_locked | \ + 1UL << PG_private | 1UL << PG_private_2 | \ + 1UL << PG_writeback | 1UL << PG_reserved | \ + 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ + 1UL << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. @@ -690,10 +717,10 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ - (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) + (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ - (1 << PG_private | 1 << PG_private_2) + (1UL << PG_private | 1UL << PG_private_2) /** * page_has_private - Determine if page has private stuff * @page: The page to be checked diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index e1fe7cf5bddf..03f2a3e7d76d 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/stacktrace.h> +#include <linux/stackdepot.h> struct pglist_data; struct page_ext_operations { @@ -44,9 +45,8 @@ struct page_ext { #ifdef CONFIG_PAGE_OWNER unsigned int order; gfp_t gfp_mask; - unsigned int nr_entries; int last_migrate_reason; - unsigned long trace_entries[8]; + depot_stack_handle_t handle; #endif }; diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { - return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { - set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { - return test_and_clear_bit(PAGE_EXT_YOUNG, - &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { - return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { - set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { - clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 46f1b939948c..30583ab0ffb1 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -10,7 +10,7 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); -extern gfp_t __get_page_owner_gfp(struct page *page); +extern void __split_page_owner(struct page *page, unsigned int order); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __dump_page_owner(struct page *page); @@ -28,12 +28,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, unsigned int order) { if (static_branch_unlikely(&page_owner_inited)) - return __get_page_owner_gfp(page); - else - return 0; + __split_page_owner(page, order); } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { @@ -58,9 +56,9 @@ static inline void set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, + unsigned int order) { - return 0; } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index e596d5d9540e..610e13271918 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -63,17 +63,17 @@ static inline void __page_ref_unfreeze(struct page *page, int v) static inline int page_ref_count(struct page *page) { - return atomic_read(&page->_count); + return atomic_read(&page->_refcount); } static inline int page_count(struct page *page) { - return atomic_read(&compound_head(page)->_count); + return atomic_read(&compound_head(page)->_refcount); } static inline void set_page_count(struct page *page, int v) { - atomic_set(&page->_count, v); + atomic_set(&page->_refcount, v); if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) __page_ref_set(page, v); } @@ -89,44 +89,53 @@ static inline void init_page_count(struct page *page) static inline void page_ref_add(struct page *page, int nr) { - atomic_add(nr, &page->_count); + atomic_add(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, nr); } static inline void page_ref_sub(struct page *page, int nr) { - atomic_sub(nr, &page->_count); + atomic_sub(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -nr); } static inline void page_ref_inc(struct page *page) { - atomic_inc(&page->_count); + atomic_inc(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, 1); } static inline void page_ref_dec(struct page *page) { - atomic_dec(&page->_count); + atomic_dec(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -1); } static inline int page_ref_sub_and_test(struct page *page, int nr) { - int ret = atomic_sub_and_test(nr, &page->_count); + int ret = atomic_sub_and_test(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -nr, ret); return ret; } +static inline int page_ref_inc_return(struct page *page) +{ + int ret = atomic_inc_return(&page->_refcount); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) + __page_ref_mod_and_return(page, 1, ret); + return ret; +} + static inline int page_ref_dec_and_test(struct page *page) { - int ret = atomic_dec_and_test(&page->_count); + int ret = atomic_dec_and_test(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -1, ret); @@ -135,7 +144,7 @@ static inline int page_ref_dec_and_test(struct page *page) static inline int page_ref_dec_return(struct page *page) { - int ret = atomic_dec_return(&page->_count); + int ret = atomic_dec_return(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) __page_ref_mod_and_return(page, -1, ret); @@ -144,7 +153,7 @@ static inline int page_ref_dec_return(struct page *page) static inline int page_ref_add_unless(struct page *page, int nr, int u) { - int ret = atomic_add_unless(&page->_count, nr, u); + int ret = atomic_add_unless(&page->_refcount, nr, u); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); @@ -153,7 +162,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u) static inline int page_ref_freeze(struct page *page, int count) { - int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count); + int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) __page_ref_freeze(page, count, ret); @@ -165,7 +174,7 @@ static inline void page_ref_unfreeze(struct page *page, int count) VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); - atomic_set(&page->_count, count); + atomic_set(&page->_refcount, count); if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) __page_ref_unfreeze(page, count); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7e1ab155c67c..01e84436cddf 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -90,12 +90,12 @@ void release_pages(struct page **pages, int nr, bool cold); /* * speculatively take a reference to a page. - * If the page is free (_count == 0), then _count is untouched, and 0 - * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * If the page is free (_refcount == 0), then _refcount is untouched, and 0 + * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. * * This function must be called inside the same rcu_read_lock() section as has * been used to lookup the page in the pagecache radix-tree (or page table): - * this allows allocators to use a synchronize_rcu() to stabilize _count. + * this allows allocators to use a synchronize_rcu() to stabilize _refcount. * * Unless an RCU grace period has passed, the count of all pages coming out * of the allocator must be considered unstable. page_count may return higher @@ -111,7 +111,7 @@ void release_pages(struct page **pages, int nr, bool cold); * 2. conditionally increment refcount * 3. check the page is still in pagecache (if no, goto 1) * - * Remove-side that cares about stability of _count (eg. reclaim) has the + * Remove-side that cares about stability of _refcount (eg. reclaim) has the * following (with tree_lock held for write): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache @@ -209,10 +209,10 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } -static inline struct page *page_cache_alloc_readahead(struct address_space *x) +static inline gfp_t readahead_gfp_mask(struct address_space *x) { - return __page_cache_alloc(mapping_gfp_mask(x) | - __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); + return mapping_gfp_mask(x) | + __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; } typedef int filler_t(void *, struct page *); @@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page) extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); -void page_endio(struct page *page, int rw, int err); +void page_endio(struct page *page, bool is_write, int err); /* * Add an arbitrary waiter to a page's wait queue @@ -518,33 +518,27 @@ void page_endio(struct page *page, int rw, int err); extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* - * Fault a userspace page into pagetables. Return non-zero on a fault. - * - * This assumes that two userspace pages are always sufficient. + * Fault one or two userspace pages into pagetables. + * Return -EINVAL if more than two pages would be needed. + * Return non-zero on a fault. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { - int ret; + int span, ret; if (unlikely(size == 0)) return 0; + span = offset_in_page(uaddr) + size; + if (span > 2 * PAGE_SIZE) + return -EINVAL; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ ret = __put_user(0, uaddr); - if (ret == 0) { - char __user *end = uaddr + size - 1; - - /* - * If the page was already mapped, this will get a cache miss - * for sure, so try to avoid doing it. - */ - if (((unsigned long)uaddr & PAGE_MASK) != - ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); - } + if (ret == 0 && span > PAGE_SIZE) + ret = __put_user(0, uaddr + size - 1); return ret; } @@ -577,56 +571,57 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) */ static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { - int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; + if (unlikely(uaddr > end)) + return -EFAULT; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ - while (uaddr <= end) { - ret = __put_user(0, uaddr); - if (ret != 0) - return ret; + do { + if (unlikely(__put_user(0, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); + return __put_user(0, end); - return ret; + return 0; } static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { volatile char c; - int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; - while (uaddr <= end) { - ret = __get_user(c, uaddr); - if (ret != 0) - return ret; + if (unlikely(uaddr > end)) + return -EFAULT; + + do { + if (unlikely(__get_user(c, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) { - ret = __get_user(c, end); - (void)c; + return __get_user(c, end); } - return ret; + (void)c; + return 0; } int add_to_page_cache_locked(struct page *page, struct address_space *mapping, diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 89ab0572dbc6..7d63a66e8ed4 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -24,6 +24,8 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) } extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); +extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res); + static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { struct pci_bus *pbus = pdev->bus; diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h new file mode 100644 index 000000000000..7adad206b1f4 --- /dev/null +++ b/include/linux/pci-ecam.h @@ -0,0 +1,67 @@ +/* + * Copyright 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. + */ +#ifndef DRIVERS_PCI_ECAM_H +#define DRIVERS_PCI_ECAM_H + +#include <linux/kernel.h> +#include <linux/platform_device.h> + +/* + * struct to hold pci ops and bus shift of the config window + * for a PCI controller. + */ +struct pci_config_window; +struct pci_ecam_ops { + unsigned int bus_shift; + struct pci_ops pci_ops; + int (*init)(struct pci_config_window *); +}; + +/* + * struct to hold the mappings of a config space window. This + * is expected to be used as sysdata for PCI controllers that + * use ECAM. + */ +struct pci_config_window { + struct resource res; + struct resource busr; + void *priv; + struct pci_ecam_ops *ops; + union { + void __iomem *win; /* 64-bit single mapping */ + void __iomem **winp; /* 32-bit per-bus mapping */ + }; + struct device *parent;/* ECAM res was from this dev */ +}; + +/* create and free pci_config_window */ +struct pci_config_window *pci_ecam_create(struct device *dev, + struct resource *cfgres, struct resource *busr, + struct pci_ecam_ops *ops); +void pci_ecam_free(struct pci_config_window *cfg); + +/* map_bus when ->sysdata is an instance of pci_config_window */ +void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); +/* default ECAM ops */ +extern struct pci_ecam_ops pci_generic_ecam_ops; + +#ifdef CONFIG_PCI_HOST_GENERIC +/* for DT-based PCI controllers that support ECAM */ +int pci_host_common_probe(struct platform_device *pdev, + struct pci_ecam_ops *ops); +#endif +#endif diff --git a/include/linux/pci.h b/include/linux/pci.h index 932ec74909c6..0ab835965669 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -101,6 +101,10 @@ enum { DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, }; +/* + * pci_power_t values must match the bits in the Capabilities PME_Support + * and Control/Status PowerState fields in the Power Management capability. + */ typedef int __bitwise pci_power_t; #define PCI_D0 ((pci_power_t __force) 0) @@ -116,7 +120,7 @@ extern const char *pci_power_names[]; static inline const char *pci_power_name(pci_power_t state) { - return pci_power_names[1 + (int) state]; + return pci_power_names[1 + (__force int) state]; } #define PCI_PM_D2_DELAY 200 @@ -166,8 +170,6 @@ enum pci_dev_flags { PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), /* Flag for quirk use to store if quirk-specific ACS is enabled */ PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), - /* Flag to indicate the device uses dma_alias_devfn */ - PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), /* Do not use bus resets for device */ @@ -273,7 +275,7 @@ struct pci_dev { u8 rom_base_reg; /* which config register controls the ROM */ u8 pin; /* which interrupt pin this device uses */ u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ - u8 dma_alias_devfn;/* devfn of DMA alias, if any */ + unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ struct pci_driver *driver; /* which driver has allocated this device */ u64 dma_mask; /* Mask of the bits of bus address this @@ -296,6 +298,7 @@ struct pci_dev { unsigned int d2_support:1; /* Low power state D2 is supported */ unsigned int no_d1d2:1; /* D1 and D2 are forbidden */ unsigned int no_d3cold:1; /* D3cold is forbidden */ + unsigned int bridge_d3:1; /* Allow D3 for bridge */ unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ unsigned int mmio_always_on:1; /* disallow turning off io/mem decoding during bar sizing */ @@ -322,6 +325,7 @@ struct pci_dev { * directly, use the values stored here. They might be different! */ unsigned int irq; + struct cpumask *irq_affinity; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ bool match_driver; /* Skip attaching driver */ @@ -679,15 +683,6 @@ struct pci_driver { #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) /** - * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table - * @_table: device table name - * - * This macro is deprecated and should not be used in new code. - */ -#define DEFINE_PCI_DEVICE_TABLE(_table) \ - const struct pci_device_id _table[] - -/** * PCI_DEVICE - macro used to describe a specific pci device * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID @@ -856,6 +851,7 @@ void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev); void pci_stop_root_bus(struct pci_bus *bus); void pci_remove_root_bus(struct pci_bus *bus); void pci_setup_cardbus(struct pci_bus *bus); +void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); void pci_sort_breadthfirst(void); #define dev_is_pci(d) ((d)->bus == &pci_bus_type) #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) @@ -1085,6 +1081,8 @@ int pci_back_from_sleep(struct pci_dev *dev); bool pci_dev_run_wake(struct pci_dev *dev); bool pci_check_pme_status(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); +void pci_d3cold_enable(struct pci_dev *dev); +void pci_d3cold_disable(struct pci_dev *dev); static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) @@ -1116,6 +1114,7 @@ int pci_set_vpd_size(struct pci_dev *dev, size_t len); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); void pci_bus_assign_resources(const struct pci_bus *bus); +void pci_bus_claim_resources(struct pci_bus *bus); void pci_bus_size_bridges(struct pci_bus *bus); int pci_claim_resource(struct pci_dev *, int); int pci_claim_bridge_resource(struct pci_dev *bridge, int i); @@ -1145,9 +1144,12 @@ void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset); void pci_free_resource_list(struct list_head *resources); -void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags); +void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, + unsigned int flags); struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); void pci_bus_remove_resources(struct pci_bus *bus); +int devm_request_pci_bus_resources(struct device *dev, + struct list_head *resources); #define pci_bus_for_each_resource(bus, res, i) \ for (i = 0; \ @@ -1165,7 +1167,11 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); +int pci_register_io_range(phys_addr_t addr, resource_size_t size); +unsigned long pci_address_to_pio(phys_addr_t addr); +phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); +void pci_unmap_iospace(struct resource *res); static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { @@ -1236,6 +1242,13 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); int pci_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); +#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */ +#define PCI_IRQ_ALL_TYPES \ + (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) + /* kmem_cache style wrapper around pci_alloc_consistent() */ #include <linux/pci-dma.h> @@ -1283,6 +1296,11 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, return rc; return 0; } +int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags); +void pci_free_irq_vectors(struct pci_dev *dev); +int pci_irq_vector(struct pci_dev *dev, unsigned int nr); + #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } static inline void pci_msi_shutdown(struct pci_dev *dev) { } @@ -1306,6 +1324,24 @@ static inline int pci_enable_msix_range(struct pci_dev *dev, static inline int pci_enable_msix_exact(struct pci_dev *dev, struct msix_entry *entries, int nvec) { return -ENOSYS; } +static inline int pci_alloc_irq_vectors(struct pci_dev *dev, + unsigned int min_vecs, unsigned int max_vecs, + unsigned int flags) +{ + if (min_vecs > 1) + return -EINVAL; + return 1; +} +static inline void pci_free_irq_vectors(struct pci_dev *dev) +{ +} + +static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + if (WARN_ON_ONCE(nr > 0)) + return -EINVAL; + return dev->irq; +} #endif #ifdef CONFIG_PCIEPORTBUS @@ -1388,12 +1424,13 @@ static inline int pci_domain_nr(struct pci_bus *bus) { return bus->domain_nr; } -void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent); +#ifdef CONFIG_ACPI +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus); #else -static inline void pci_bus_assign_domain_nr(struct pci_bus *bus, - struct device *parent) -{ -} +static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ return 0; } +#endif +int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); #endif /* some architectures require additional setup to direct VGA traffic */ @@ -1401,6 +1438,34 @@ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); void pci_register_set_vga_state(arch_set_vga_state_t func); +static inline int +pci_request_io_regions(struct pci_dev *pdev, const char *name) +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} + #else /* CONFIG_PCI is not enabled */ static inline void pci_set_flags(int flags) { } @@ -1481,6 +1546,8 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } +static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } + static inline void pci_block_cfg_access(struct pci_dev *dev) { } static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) { return 0; } @@ -1551,7 +1618,11 @@ static inline const char *pci_name(const struct pci_dev *pdev) /* Some archs don't want to expose struct resource to userland as-is * in sysfs and /proc */ -#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER +#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER +void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end); +#else static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) @@ -1664,7 +1735,7 @@ enum pci_fixup_pass { #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); -void pci_dev_specific_enable_acs(struct pci_dev *dev); +int pci_dev_specific_enable_acs(struct pci_dev *dev); #else static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } @@ -1673,7 +1744,10 @@ static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, { return -ENOTTY; } -static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { } +static inline int pci_dev_specific_enable_acs(struct pci_dev *dev) +{ + return -ENOTTY; +} #endif void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); @@ -1700,6 +1774,7 @@ extern u8 pci_cache_line_size; extern unsigned long pci_hotplug_io_size; extern unsigned long pci_hotplug_mem_size; +extern unsigned long pci_hotplug_bus_size; /* Architecture-specific versions may override these (weak) */ void pcibios_disable_device(struct pci_dev *dev); @@ -1716,7 +1791,7 @@ void pcibios_free_irq(struct pci_dev *dev); extern struct dev_pm_ops pcibios_pm_ops; #endif -#ifdef CONFIG_PCI_MMCONFIG +#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG) void __init pci_mmcfg_early_init(void); void __init pci_mmcfg_late_init(void); #else @@ -1989,6 +2064,8 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) } #endif +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn); +bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 247da8c95860..c58752fe16c4 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2604,6 +2604,24 @@ #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82380FB 0x124b #define PCI_DEVICE_ID_INTEL_82439 0x1250 +#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */ +#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a +#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */ +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548 +#define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */ +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569 +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */ +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */ +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index 4f1089f2cc98..afcd130ab3a9 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h @@ -21,6 +21,8 @@ #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) #define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ #define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) +#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */ +#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) struct pcie_device { int irq; /* Service IRQ/MSI/MSI-X Vector */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 84f542df7ff5..1c7eec09e5eb 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -136,14 +136,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, * used as a pointer. If the compiler generates a separate fetch * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in * between contaminating the pointer value, meaning that - * ACCESS_ONCE() is required when fetching it. - * - * Also, we need a data dependency barrier to be paired with - * smp_store_release() in __percpu_ref_switch_to_percpu(). - * - * Use lockless deref which contains both. + * READ_ONCE() is required when fetching it. */ - percpu_ptr = lockless_dereference(ref->percpu_count_ptr); + percpu_ptr = READ_ONCE(ref->percpu_count_ptr); + + /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */ + smp_read_barrier_depends(); /* * Theoretically, the following could test just ATOMIC; however, diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4bc6dafb703e..56939d3f6e53 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -129,7 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) -/* To avoid include hell, as printk can not declare this, we declare it here */ -DECLARE_PER_CPU(printk_func_t, printk_func); - #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 4196c90a3c88..e18843809eec 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -105,9 +105,11 @@ struct arm_pmu { struct mutex reserve_mutex; u64 max_period; bool secure_access; /* 32-bit ARM only */ +#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 + DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; - struct notifier_block hotplug_nb; + struct list_head entry; struct notifier_block cpu_pm_nb; }; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f291275ffd71..2b6b43cc0dd5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -58,12 +58,33 @@ struct perf_guest_info_callbacks { struct perf_callchain_entry { __u64 nr; - __u64 ip[PERF_MAX_STACK_DEPTH]; + __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ }; +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short contexts; + bool contexts_maxed; +}; + +typedef unsigned long (*perf_copy_f)(void *dst, const void *src, + unsigned long off, unsigned long len); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + unsigned long pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __packed; + struct perf_raw_record { + struct perf_raw_frag frag; u32 size; - void *data; }; /* @@ -151,6 +172,15 @@ struct hw_perf_event { */ struct task_struct *target; + /* + * PMU would store hardware filter configuration + * here. + */ + void *addr_filters; + + /* Last sync'ed generation of filters */ + unsigned long addr_filters_gen; + /* * hw_perf_event::state flags; used to track the PERF_EF_* state. */ @@ -216,6 +246,7 @@ struct perf_event; #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 #define PERF_PMU_CAP_EXCLUSIVE 0x10 #define PERF_PMU_CAP_ITRACE 0x20 +#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 /** * struct pmu - generic performance monitoring unit @@ -240,6 +271,9 @@ struct pmu { int task_ctx_nr; int hrtimer_interval_ms; + /* number of address filters this PMU can do */ + unsigned int nr_addr_filters; + /* * Fully disable/enable this PMU, can be used to protect from the PMI * as well as for lazy/batch writing of the MSRs. @@ -393,12 +427,71 @@ struct pmu { void (*free_aux) (void *aux); /* optional */ /* + * Validate address range filters: make sure the HW supports the + * requested configuration and number of filters; return 0 if the + * supplied filters are valid, -errno otherwise. + * + * Runs in the context of the ioctl()ing process and is not serialized + * with the rest of the PMU callbacks. + */ + int (*addr_filters_validate) (struct list_head *filters); + /* optional */ + + /* + * Synchronize address range filter configuration: + * translate hw-agnostic filters into hardware configuration in + * event::hw::addr_filters. + * + * Runs as a part of filter sync sequence that is done in ->start() + * callback by calling perf_event_addr_filters_sync(). + * + * May (and should) traverse event::addr_filters::list, for which its + * caller provides necessary serialization. + */ + void (*addr_filters_sync) (struct perf_event *event); + /* optional */ + + /* * Filter events for PMU-specific reasons. */ int (*filter_match) (struct perf_event *event); /* optional */ }; /** + * struct perf_addr_filter - address range filter definition + * @entry: event's filter list linkage + * @inode: object file's inode for file-based filters + * @offset: filter range offset + * @size: filter range size + * @range: 1: range, 0: address + * @filter: 1: filter/start, 0: stop + * + * This is a hardware-agnostic filter configuration as specified by the user. + */ +struct perf_addr_filter { + struct list_head entry; + struct inode *inode; + unsigned long offset; + unsigned long size; + unsigned int range : 1, + filter : 1; +}; + +/** + * struct perf_addr_filters_head - container for address range filters + * @list: list of filters for this event + * @lock: spinlock that serializes accesses to the @list and event's + * (and its children's) filter generations. + * + * A child event will use parent's @list (and therefore @lock), so they are + * bundled together; see perf_event_addr_filters(). + */ +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; +}; + +/** * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { @@ -437,6 +530,11 @@ struct swevent_hlist { struct perf_cgroup; struct ring_buffer; +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + /** * struct perf_event - performance event kernel representation: */ @@ -566,6 +664,12 @@ struct perf_event { atomic_t event_limit; + /* address range filters */ + struct perf_addr_filters_head addr_filters; + /* vma address array for file-based filders */ + unsigned long *addr_filters_offs; + unsigned long addr_filters_gen; + void (*destroy)(struct perf_event *); struct rcu_head rcu_head; @@ -589,6 +693,7 @@ struct perf_event { int cgrp_defer_enabled; #endif + struct list_head sb_list; #endif /* CONFIG_PERF_EVENTS */ }; @@ -638,7 +743,9 @@ struct perf_event_context { u64 parent_gen; u64 generation; int pin_count; +#ifdef CONFIG_CGROUP_PERF int nr_cgroups; /* cgroup evts */ +#endif void *task_ctx_data; /* pmu specific data */ struct rcu_head rcu_head; }; @@ -664,7 +771,9 @@ struct perf_cpu_context { unsigned int hrtimer_active; struct pmu *unique_pmu; +#ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; +#endif }; struct perf_output_handle { @@ -834,9 +943,25 @@ extern int perf_event_overflow(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); +extern void perf_event_output_forward(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); +extern void perf_event_output_backward(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); extern void perf_event_output(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs); + struct perf_sample_data *data, + struct pt_regs *regs); + +static inline bool +is_default_overflow_handler(struct perf_event *event) +{ + if (likely(event->overflow_handler == perf_event_output_forward)) + return true; + if (unlikely(event->overflow_handler == perf_event_output_backward)) + return true; + return false; +} extern void perf_event_header__init_id(struct perf_event_header *header, @@ -882,8 +1007,6 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { - memset(regs, 0, sizeof(*regs)); - perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } @@ -969,18 +1092,36 @@ extern void perf_event_fork(struct task_struct *tsk); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); -extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); -extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, - bool crosstask, bool add_mark); -extern int get_callchain_buffers(void); + u32 max_stack, bool crosstask, bool add_mark); +extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); -static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +extern int sysctl_perf_event_max_stack; +extern int sysctl_perf_event_max_contexts_per_stack; + +static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) { - if (entry->nr < PERF_MAX_STACK_DEPTH) { + if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { + struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; + ++ctx->contexts; + return 0; + } else { + ctx->contexts_maxed = true; + return -1; /* no more room, stop walking the stack */ + } +} + +static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) +{ + if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { + struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; + ++ctx->nr; return 0; } else { return -1; /* no more room, stop walking the stack */ @@ -1001,6 +1142,8 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +int perf_event_max_stack_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); static inline bool perf_paranoid_tracepoint_raw(void) { @@ -1018,7 +1161,7 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(u64 addr, u64 count, void *record, +extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task); @@ -1045,8 +1188,41 @@ static inline bool has_aux(struct perf_event *event) return event->pmu->setup_aux; } +static inline bool is_write_backward(struct perf_event *event) +{ + return !!event->attr.write_backward; +} + +static inline bool has_addr_filter(struct perf_event *event) +{ + return event->pmu->nr_addr_filters; +} + +/* + * An inherited event uses parent's filters + */ +static inline struct perf_addr_filters_head * +perf_event_addr_filters(struct perf_event *event) +{ + struct perf_addr_filters_head *ifh = &event->addr_filters; + + if (event->parent) + ifh = &event->parent->addr_filters; + + return ifh; +} + +extern void perf_event_addr_filters_sync(struct perf_event *event); + extern int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size); +extern int perf_output_begin_forward(struct perf_output_handle *handle, + struct perf_event *event, + unsigned int size); +extern int perf_output_begin_backward(struct perf_output_handle *handle, + struct perf_event *event, + unsigned int size); + extern void perf_output_end(struct perf_output_handle *handle); extern unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); @@ -1130,42 +1306,12 @@ extern void perf_restore_debug_store(void); static inline void perf_restore_debug_store(void) { } #endif -#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) - -/* - * This has to have a higher priority than migration_notifier in sched/core.c. - */ -#define perf_cpu_notifier(fn) \ -do { \ - static struct notifier_block fn##_nb = \ - { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ - unsigned long cpu = smp_processor_id(); \ - unsigned long flags; \ - \ - cpu_notifier_register_begin(); \ - fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ - (void *)(unsigned long)cpu); \ - local_irq_save(flags); \ - fn(&fn##_nb, (unsigned long)CPU_STARTING, \ - (void *)(unsigned long)cpu); \ - local_irq_restore(flags); \ - fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ - (void *)(unsigned long)cpu); \ - __register_cpu_notifier(&fn##_nb); \ - cpu_notifier_register_done(); \ -} while (0) +static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) +{ + return frag->pad < sizeof(u64); +} -/* - * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the - * callback for already online CPUs. - */ -#define __perf_cpu_notifier(fn) \ -do { \ - static struct notifier_block fn##_nb = \ - { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ - \ - __register_cpu_notifier(&fn##_nb); \ -} while (0) +#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) struct perf_pmu_events_attr { struct device_attribute attr; @@ -1173,6 +1319,13 @@ struct perf_pmu_events_attr { const char *event_str; }; +struct perf_pmu_events_ht_attr { + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; +}; + ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); @@ -1201,4 +1354,13 @@ _name##_show(struct device *dev, \ \ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) +/* Performance counter hotplug functions */ +#ifdef CONFIG_PERF_EVENTS +int perf_event_init_cpu(unsigned int cpu); +int perf_event_exit_cpu(unsigned int cpu); +#else +#define perf_event_init_cpu NULL +#define perf_event_exit_cpu NULL +#endif + #endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 94994810c7c0..a3d90b9da18d 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -28,7 +28,10 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn) return __pfn_to_pfn_t(pfn, 0); } -extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags); +static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags) +{ + return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); +} static inline bool pfn_t_has_page(pfn_t pfn) { diff --git a/include/linux/phy.h b/include/linux/phy.h index 2abd7918f64f..2d24b283aa2d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -805,6 +805,10 @@ void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); +int phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_ksettings_set(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); @@ -825,6 +829,10 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data); int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +int phy_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd); int __init mdio_bus_init(void); void mdio_bus_exit(void); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 8cf05e341cff..f08b67238b58 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -22,12 +22,20 @@ struct phy; +enum phy_mode { + PHY_MODE_INVALID, + PHY_MODE_USB_HOST, + PHY_MODE_USB_DEVICE, + PHY_MODE_USB_OTG, +}; + /** * struct phy_ops - set of function pointers for performing phy operations * @init: operation to be performed for initializing phy * @exit: operation to be performed while exiting * @power_on: powering on the phy * @power_off: powering off the phy + * @set_mode: set the mode of the phy * @owner: the module owner containing the ops */ struct phy_ops { @@ -35,6 +43,7 @@ struct phy_ops { int (*exit)(struct phy *phy); int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); + int (*set_mode)(struct phy *phy, enum phy_mode mode); struct module *owner; }; @@ -77,6 +86,7 @@ struct phy { */ struct phy_provider { struct device *dev; + struct device_node *children; struct module *owner; struct list_head list; struct phy * (*of_xlate)(struct device *dev, @@ -93,10 +103,16 @@ struct phy_lookup { #define to_phy(a) (container_of((a), struct phy, dev)) #define of_phy_provider_register(dev, xlate) \ - __of_phy_provider_register((dev), THIS_MODULE, (xlate)) + __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) #define devm_of_phy_provider_register(dev, xlate) \ - __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate)) + __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) + +#define of_phy_provider_register_full(dev, children, xlate) \ + __of_phy_provider_register(dev, children, THIS_MODULE, xlate) + +#define devm_of_phy_provider_register_full(dev, children, xlate) \ + __devm_of_phy_provider_register(dev, children, THIS_MODULE, xlate) static inline void phy_set_drvdata(struct phy *phy, void *data) { @@ -119,6 +135,7 @@ int phy_init(struct phy *phy); int phy_exit(struct phy *phy); int phy_power_on(struct phy *phy); int phy_power_off(struct phy *phy); +int phy_set_mode(struct phy *phy, enum phy_mode mode); static inline int phy_get_bus_width(struct phy *phy) { return phy->attrs.bus_width; @@ -147,11 +164,13 @@ struct phy *devm_phy_create(struct device *dev, struct device_node *node, void phy_destroy(struct phy *phy); void devm_phy_destroy(struct device *dev, struct phy *phy); struct phy_provider *__of_phy_provider_register(struct device *dev, - struct module *owner, struct phy * (*of_xlate)(struct device *dev, - struct of_phandle_args *args)); + struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)); struct phy_provider *__devm_of_phy_provider_register(struct device *dev, - struct module *owner, struct phy * (*of_xlate)(struct device *dev, - struct of_phandle_args *args)); + struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)); void of_phy_provider_unregister(struct phy_provider *phy_provider); void devm_of_phy_provider_unregister(struct device *dev, struct phy_provider *phy_provider); @@ -224,6 +243,13 @@ static inline int phy_power_off(struct phy *phy) return -ENOSYS; } +static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) +{ + if (!phy) + return 0; + return -ENOSYS; +} + static inline int phy_get_bus_width(struct phy *phy) { return -ENOSYS; @@ -312,15 +338,17 @@ static inline void devm_phy_destroy(struct device *dev, struct phy *phy) } static inline struct phy_provider *__of_phy_provider_register( - struct device *dev, struct module *owner, struct phy * (*of_xlate)( - struct device *dev, struct of_phandle_args *args)) + struct device *dev, struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)) { return ERR_PTR(-ENOSYS); } static inline struct phy_provider *__devm_of_phy_provider_register(struct device - *dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev, - struct of_phandle_args *args)) + *dev, struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h new file mode 100644 index 000000000000..8e1a57a78d9f --- /dev/null +++ b/include/linux/phy/tegra/xusb.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef PHY_TEGRA_XUSB_H +#define PHY_TEGRA_XUSB_H + +struct tegra_xusb_padctl; +struct device; + +struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev); +void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl); + +int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl, + unsigned int port); +int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl, + unsigned int port, bool idle); +int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl, + unsigned int port, bool enable); + +#endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index d921afd5f109..12343caa114e 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -175,6 +175,8 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev, int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps, enum pinctrl_map_type type); +void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned num_maps); static inline int pinconf_generic_dt_node_to_map_group( struct pinctrl_dev *pctldev, struct device_node *np_config, diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 9ba59fcba549..a42e57da270d 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -144,6 +144,12 @@ struct pinctrl_desc { extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); extern void pinctrl_unregister(struct pinctrl_dev *pctldev); +extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, + struct pinctrl_desc *pctldesc, + void *driver_data); +extern void devm_pinctrl_unregister(struct device *dev, + struct pinctrl_dev *pctldev); + extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h index 3c73c045f8da..e684543254f3 100644 --- a/include/linux/platform_data/asoc-ti-mcbsp.h +++ b/include/linux/platform_data/asoc-ti-mcbsp.h @@ -44,7 +44,7 @@ struct omap_mcbsp_platform_data { /* McBSP platform and instance specific features */ bool has_wakeup; /* Wakeup capability */ bool has_ccr; /* Transceiver has configuration control registers */ - int (*enable_st_clock)(unsigned int, bool); + int (*force_ick_on)(struct clk *clk, bool force_on); }; /** @@ -55,4 +55,6 @@ struct omap_mcbsp_dev_attr { const char *sidetone; }; +void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata); + #endif diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h index dc9a13e5acda..271a4e25af67 100644 --- a/include/linux/platform_data/at24.h +++ b/include/linux/platform_data/at24.h @@ -10,6 +10,7 @@ #include <linux/types.h> #include <linux/nvmem-consumer.h> +#include <linux/bitops.h> /** * struct at24_platform_data - data to set up at24 (generic eeprom) driver @@ -26,7 +27,7 @@ * * An example in pseudo code for a setup() callback: * - * void get_mac_addr(struct mvmem_device *nvmem, void *context) + * void get_mac_addr(struct nvmem_device *nvmem, void *context) * { * u8 *mac_addr = ethernet_pdata->mac_addr; * off_t offset = context; @@ -43,10 +44,12 @@ struct at24_platform_data { u32 byte_len; /* size (sum of all addr) */ u16 page_size; /* for writes */ u8 flags; -#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */ -#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ -#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ -#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ +#define AT24_FLAG_ADDR16 BIT(7) /* address pointer is 16 bit */ +#define AT24_FLAG_READONLY BIT(6) /* sysfs-entry will be read-only */ +#define AT24_FLAG_IRUGO BIT(5) /* sysfs-entry will be world-readable */ +#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ +#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ +#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ void (*setup)(struct nvmem_device *nvmem, void *context); void *context; diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h new file mode 100644 index 000000000000..69d279c0da96 --- /dev/null +++ b/include/linux/platform_data/b53.h @@ -0,0 +1,33 @@ +/* + * B53 platform data + * + * Copyright (C) 2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __B53_H +#define __B53_H + +#include <linux/kernel.h> + +struct b53_platform_data { + u32 chip_id; + u16 enabled_ports; + + /* only used by MMAP'd driver */ + unsigned big_endian:1; + void __iomem *regs; +}; + +#endif diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h deleted file mode 100644 index 3af0da1f3be5..000000000000 --- a/include/linux/platform_data/clk-ux500.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Clock definitions for ux500 platforms - * - * Copyright (C) 2012 ST-Ericsson SA - * Author: Ulf Hansson <ulf.hansson@linaro.org> - * - * License terms: GNU General Public License (GPL) version 2 - */ - -#ifndef __CLK_UX500_H -#define __CLK_UX500_H - -void u8500_clk_init(void); -void u9540_clk_init(void); -void u8540_clk_init(void); - -#endif /* __CLK_UX500_H */ diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 03b6095d3b18..d15d8ba8cc24 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -21,15 +21,15 @@ * @dma_dev: required DMA master device * @src_id: src request line * @dst_id: dst request line - * @src_master: src master for transfers on allocated channel. - * @dst_master: dest master for transfers on allocated channel. + * @m_master: memory master for transfers on allocated channel + * @p_master: peripheral master for transfers on allocated channel */ struct dw_dma_slave { struct device *dma_dev; u8 src_id; u8 dst_id; - u8 src_master; - u8 dst_master; + u8 m_master; + u8 p_master; }; /** @@ -43,7 +43,7 @@ struct dw_dma_slave { * @block_size: Maximum block size supported by the controller * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master - * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) + * (in bytes, power of 2) */ struct dw_dma_platform_data { unsigned int nr_channels; @@ -55,7 +55,7 @@ struct dw_dma_platform_data { #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ unsigned char chan_priority; - unsigned short block_size; + unsigned int block_size; unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; }; diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h deleted file mode 100644 index 0a03b0944411..000000000000 --- a/include/linux/platform_data/drv260x-pdata.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Platform data for DRV260X haptics driver family - * - * Author: Dan Murphy <dmurphy@ti.com> - * - * Copyright: (C) 2014 Texas Instruments, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef _LINUX_DRV260X_PDATA_H -#define _LINUX_DRV260X_PDATA_H - -struct drv260x_platform_data { - u32 library_selection; - u32 mode; - u32 vib_rated_voltage; - u32 vib_overdrive_voltage; -}; - -#endif diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h index 28702c849af1..2dc7f4a8ab09 100644 --- a/include/linux/platform_data/gpio-dwapb.h +++ b/include/linux/platform_data/gpio-dwapb.h @@ -15,8 +15,7 @@ #define GPIO_DW_APB_H struct dwapb_port_property { - struct device_node *node; - const char *name; + struct fwnode_handle *fwnode; unsigned int idx; unsigned int ngpio; unsigned int gpio_base; diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h new file mode 100644 index 000000000000..67ccdb0e1606 --- /dev/null +++ b/include/linux/platform_data/gpmc-omap.h @@ -0,0 +1,172 @@ +/* + * OMAP GPMC Platform data + * + * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Roger Quadros <rogerq@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _GPMC_OMAP_H_ +#define _GPMC_OMAP_H_ + +/* Maximum Number of Chip Selects */ +#define GPMC_CS_NUM 8 + +/* bool type time settings */ +struct gpmc_bool_timings { + bool cycle2cyclediffcsen; + bool cycle2cyclesamecsen; + bool we_extra_delay; + bool oe_extra_delay; + bool adv_extra_delay; + bool cs_extra_delay; + bool time_para_granularity; +}; + +/* + * Note that all values in this struct are in nanoseconds except sync_clk + * (which is in picoseconds), while the register values are in gpmc_fck cycles. + */ +struct gpmc_timings { + /* Minimum clock period for synchronous mode (in picoseconds) */ + u32 sync_clk; + + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ + u32 cs_on; /* Assertion time */ + u32 cs_rd_off; /* Read deassertion time */ + u32 cs_wr_off; /* Write deassertion time */ + + /* ADV signal timings corresponding to GPMC_CONFIG3 */ + u32 adv_on; /* Assertion time */ + u32 adv_rd_off; /* Read deassertion time */ + u32 adv_wr_off; /* Write deassertion time */ + u32 adv_aad_mux_on; /* ADV assertion time for AAD */ + u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ + u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ + + /* WE signals timings corresponding to GPMC_CONFIG4 */ + u32 we_on; /* WE assertion time */ + u32 we_off; /* WE deassertion time */ + + /* OE signals timings corresponding to GPMC_CONFIG4 */ + u32 oe_on; /* OE assertion time */ + u32 oe_off; /* OE deassertion time */ + u32 oe_aad_mux_on; /* OE assertion time for AAD */ + u32 oe_aad_mux_off; /* OE deassertion time for AAD */ + + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ + u32 page_burst_access; /* Multiple access word delay */ + u32 access; /* Start-cycle to first data valid delay */ + u32 rd_cycle; /* Total read cycle time */ + u32 wr_cycle; /* Total write cycle time */ + + u32 bus_turnaround; + u32 cycle2cycle_delay; + + u32 wait_monitoring; + u32 clk_activation; + + /* The following are only on OMAP3430 */ + u32 wr_access; /* WRACCESSTIME */ + u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ + + struct gpmc_bool_timings bool_timings; +}; + +/* Device timings in picoseconds */ +struct gpmc_device_timings { + u32 t_ceasu; /* address setup to CS valid */ + u32 t_avdasu; /* address setup to ADV valid */ + /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is + * of tusb using these timings even for sync whilst + * ideally for adv_rd/(wr)_off it should have considered + * t_avdh instead. This indirectly necessitates r/w + * variations of t_avdp as it is possible to have one + * sync & other async + */ + u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ + u32 t_avdp_w; + u32 t_aavdh; /* address hold time */ + u32 t_oeasu; /* address setup to OE valid */ + u32 t_aa; /* access time from ADV assertion */ + u32 t_iaa; /* initial access time */ + u32 t_oe; /* access time from OE assertion */ + u32 t_ce; /* access time from CS asertion */ + u32 t_rd_cycle; /* read cycle time */ + u32 t_cez_r; /* read CS deassertion to high Z */ + u32 t_cez_w; /* write CS deassertion to high Z */ + u32 t_oez; /* OE deassertion to high Z */ + u32 t_weasu; /* address setup to WE valid */ + u32 t_wpl; /* write assertion time */ + u32 t_wph; /* write deassertion time */ + u32 t_wr_cycle; /* write cycle time */ + + u32 clk; + u32 t_bacc; /* burst access valid clock to output delay */ + u32 t_ces; /* CS setup time to clk */ + u32 t_avds; /* ADV setup time to clk */ + u32 t_avdh; /* ADV hold time from clk */ + u32 t_ach; /* address hold time from clk */ + u32 t_rdyo; /* clk to ready valid */ + + u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ + u32 t_ce_avd; /* CS on to ADV on delay */ + + /* XXX: check the possibility of combining + * cyc_aavhd_oe & cyc_aavdh_we + */ + u8 cyc_aavdh_oe;/* read address hold time in cycles */ + u8 cyc_aavdh_we;/* write address hold time in cycles */ + u8 cyc_oe; /* access time from OE assertion in cycles */ + u8 cyc_wpl; /* write deassertion time in cycles */ + u32 cyc_iaa; /* initial access time in cycles */ + + /* extra delays */ + bool ce_xdelay; + bool avd_xdelay; + bool oe_xdelay; + bool we_xdelay; +}; + +#define GPMC_BURST_4 4 /* 4 word burst */ +#define GPMC_BURST_8 8 /* 8 word burst */ +#define GPMC_BURST_16 16 /* 16 word burst */ +#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ +#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ +#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ +#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ + +struct gpmc_settings { + bool burst_wrap; /* enables wrap bursting */ + bool burst_read; /* enables read page/burst mode */ + bool burst_write; /* enables write page/burst mode */ + bool device_nand; /* device is NAND */ + bool sync_read; /* enables synchronous reads */ + bool sync_write; /* enables synchronous writes */ + bool wait_on_read; /* monitor wait on reads */ + bool wait_on_write; /* monitor wait on writes */ + u32 burst_len; /* page/burst length */ + u32 device_width; /* device bus width (8 or 16 bit) */ + u32 mux_add_data; /* multiplex address & data */ + u32 wait_pin; /* wait-pin to be used */ +}; + +/* Data for each chip select */ +struct gpmc_omap_cs_data { + bool valid; /* data is valid */ + bool is_nand; /* device within this CS is NAND */ + struct gpmc_settings *settings; + struct gpmc_device_timings *device_timings; + struct gpmc_timings *gpmc_timings; + struct platform_device *pdev; /* device within this CS region */ + unsigned int pdata_size; +}; + +struct gpmc_omap_platform_data { + struct gpmc_omap_cs_data cs[GPMC_CS_NUM]; +}; + +#endif /* _GPMC_OMAP_H */ diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h index ad3aa7b95f35..554b59801aa8 100644 --- a/include/linux/platform_data/invensense_mpu6050.h +++ b/include/linux/platform_data/invensense_mpu6050.h @@ -16,13 +16,16 @@ /** * struct inv_mpu6050_platform_data - Platform data for the mpu driver - * @orientation: Orientation matrix of the chip + * @orientation: Orientation matrix of the chip (deprecated in favor of + * mounting matrix retrieved from device-tree) * * Contains platform specific information on how to configure the MPU6050 to * work on this platform. The orientation matricies are 3x3 rotation matricies * that are applied to the data to rotate from the mounting orientation to the * platform orientation. The values must be one of 0, 1, or -1 and each row and * column should have exactly 1 non-zero value. + * + * Deprecated in favor of mounting matrix retrieved from device-tree. */ struct inv_mpu6050_platform_data { __s8 orientation[9]; diff --git a/include/linux/platform_data/mailbox-omap.h b/include/linux/platform_data/mailbox-omap.h deleted file mode 100644 index 4631dbb4255e..000000000000 --- a/include/linux/platform_data/mailbox-omap.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * mailbox-omap.h - * - * Copyright (C) 2013 Texas Instruments, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _PLAT_MAILBOX_H -#define _PLAT_MAILBOX_H - -/* Interrupt register configuration types */ -#define MBOX_INTR_CFG_TYPE1 (0) -#define MBOX_INTR_CFG_TYPE2 (1) - -/** - * struct omap_mbox_dev_info - OMAP mailbox device attribute info - * @name: name of the mailbox device - * @tx_id: mailbox queue id used for transmitting messages - * @rx_id: mailbox queue id on which messages are received - * @irq_id: irq identifier number to use from the hwmod data - * @usr_id: mailbox user id for identifying the interrupt into - * the MPU interrupt controller. - */ -struct omap_mbox_dev_info { - const char *name; - u32 tx_id; - u32 rx_id; - u32 irq_id; - u32 usr_id; -}; - -/** - * struct omap_mbox_pdata - OMAP mailbox platform data - * @intr_type: type of interrupt configuration registers used - while programming mailbox queue interrupts - * @num_users: number of users (processor devices) that the mailbox - * h/w block can interrupt - * @num_fifos: number of h/w fifos within the mailbox h/w block - * @info_cnt: number of mailbox devices for the platform - * @info: array of mailbox device attributes - */ -struct omap_mbox_pdata { - u32 intr_type; - u32 num_users; - u32 num_fifos; - u32 info_cnt; - struct omap_mbox_dev_info *info; -}; - -#endif /* _PLAT_MAILBOX_H */ diff --git a/include/linux/platform_data/media/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h index 6709b1cd7c77..ce5d90e1a6e4 100644 --- a/include/linux/platform_data/media/camera-pxa.h +++ b/include/linux/platform_data/media/camera-pxa.h @@ -37,6 +37,8 @@ struct pxacamera_platform_data { unsigned long flags; unsigned long mclk_10khz; + int sensor_i2c_adapter_id; + int sensor_i2c_address; }; extern void pxa_set_camera_info(struct pxacamera_platform_data *); diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h index 104aa892f31b..812d87307877 100644 --- a/include/linux/platform_data/media/ir-rx51.h +++ b/include/linux/platform_data/media/ir-rx51.h @@ -2,8 +2,6 @@ #define _LIRC_RX51_H struct lirc_rx51_platform_data { - int pwm_timer; - int(*set_max_mpu_wakeup_lat)(struct device *dev, long t); }; diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 95ccab3f454a..7daa78a2f342 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -46,5 +46,6 @@ struct esdhc_platform_data { bool support_vsel; unsigned int delay_line; unsigned int tuning_step; /* The delay cell steps in tuning procedure */ + unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ }; #endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index 090bbab0130a..17d57a18bac5 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h @@ -45,7 +45,6 @@ enum omap_ecc { }; struct gpmc_nand_regs { - void __iomem *gpmc_status; void __iomem *gpmc_nand_command; void __iomem *gpmc_nand_address; void __iomem *gpmc_nand_data; @@ -64,21 +63,24 @@ struct gpmc_nand_regs { void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; + /* Deprecated. Do not use */ + void __iomem *gpmc_status; }; struct omap_nand_platform_data { int cs; struct mtd_partition *parts; int nr_parts; - bool dev_ready; bool flash_bbt; enum nand_io xfer_type; int devsize; enum omap_ecc ecc_opt; - struct gpmc_nand_regs reg; - /* for passing the partitions */ - struct device_node *of_node; struct device_node *elm_of_node; + + /* deprecated */ + struct gpmc_nand_regs reg; + struct device_node *of_node; + bool dev_ready; }; #endif diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h new file mode 100644 index 000000000000..679177929045 --- /dev/null +++ b/include/linux/platform_data/omapdss.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2016 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __OMAPDSS_PDATA_H +#define __OMAPDSS_PDATA_H + +enum omapdss_version { + OMAPDSS_VER_UNKNOWN = 0, + OMAPDSS_VER_OMAP24xx, + OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */ + OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */ + OMAPDSS_VER_OMAP3630, + OMAPDSS_VER_AM35xx, + OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */ + OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */ + OMAPDSS_VER_OMAP4, /* All other OMAP4s */ + OMAPDSS_VER_OMAP5, + OMAPDSS_VER_AM43xx, + OMAPDSS_VER_DRA7xx, +}; + +/* Board specific data */ +struct omap_dss_board_info { + const char *default_display_name; + int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask); + void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask); + int (*set_min_bus_tput)(struct device *dev, unsigned long r); + enum omapdss_version version; +}; + +#endif /* __OMAPDSS_PDATA_H */ diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h index 59384217208f..e7d521e48855 100644 --- a/include/linux/platform_data/pwm_omap_dmtimer.h +++ b/include/linux/platform_data/pwm_omap_dmtimer.h @@ -35,6 +35,16 @@ #ifndef __PWM_OMAP_DMTIMER_PDATA_H #define __PWM_OMAP_DMTIMER_PDATA_H +/* clock sources */ +#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00 +#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01 +#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02 + +/* timer interrupt enable bits */ +#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2) +#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1) +#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0) + /* trigger types */ #define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00 #define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01 @@ -45,15 +55,23 @@ typedef struct omap_dm_timer pwm_omap_dmtimer; struct pwm_omap_dmtimer_pdata { pwm_omap_dmtimer *(*request_by_node)(struct device_node *np); + pwm_omap_dmtimer *(*request_specific)(int timer_id); + pwm_omap_dmtimer *(*request)(void); + int (*free)(pwm_omap_dmtimer *timer); void (*enable)(pwm_omap_dmtimer *timer); void (*disable)(pwm_omap_dmtimer *timer); + int (*get_irq)(pwm_omap_dmtimer *timer); + int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value); + int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask); + struct clk *(*get_fclk)(pwm_omap_dmtimer *timer); int (*start)(pwm_omap_dmtimer *timer); int (*stop)(pwm_omap_dmtimer *timer); + int (*set_source)(pwm_omap_dmtimer *timer, int source); int (*set_load)(pwm_omap_dmtimer *timer, int autoreload, unsigned int value); @@ -63,7 +81,10 @@ struct pwm_omap_dmtimer_pdata { int toggle, int trigger); int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler); + unsigned int (*read_counter)(pwm_omap_dmtimer *timer); int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value); + unsigned int (*read_status)(pwm_omap_dmtimer *timer); + int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value); }; #endif /* __PWM_OMAP_DMTIMER_PDATA_H */ diff --git a/include/linux/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h index 22c53825528f..22c53825528f 100644 --- a/include/linux/rtc-ds2404.h +++ b/include/linux/platform_data/rtc-ds2404.h diff --git a/include/linux/m48t86.h b/include/linux/platform_data/rtc-m48t86.h index 915d6b4f0f89..915d6b4f0f89 100644 --- a/include/linux/m48t86.h +++ b/include/linux/platform_data/rtc-m48t86.h diff --git a/include/linux/rtc-v3020.h b/include/linux/platform_data/rtc-v3020.h index e55d82cebf80..e55d82cebf80 100644 --- a/include/linux/rtc-v3020.h +++ b/include/linux/platform_data/rtc-v3020.h diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h new file mode 100644 index 000000000000..2e5eea358194 --- /dev/null +++ b/include/linux/platform_data/sht3x.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2016 Sensirion AG, Switzerland + * Author: David Frey <david.frey@sensirion.com> + * Author: Pascal Sachs <pascal.sachs@sensirion.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SHT3X_H_ +#define __SHT3X_H_ + +struct sht3x_platform_data { + bool blocking_io; + bool high_precision; +}; +#endif /* __SHT3X_H_ */ diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index fb5625bcca9a..5c1e21c87270 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -38,6 +38,7 @@ struct s3c64xx_spi_csinfo { struct s3c64xx_spi_info { int src_clk_nr; int num_cs; + bool no_cs; int (*cfg_gpio)(void); dma_filter_fn filter; void *dma_tx; diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h index 817dfdb37885..6f0fb6ebd7db 100644 --- a/include/linux/platform_data/st33zp24.h +++ b/include/linux/platform_data/st33zp24.h @@ -1,6 +1,6 @@ /* * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24 - * Copyright (C) 2009 - 2015 STMicroelectronics + * Copyright (C) 2009 - 2016 STMicroelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index 753839187ba0..79b0e4cdb814 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -16,9 +16,11 @@ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * Available only for accelerometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). + * @open_drain: set the interrupt line to be open drain if possible. */ struct st_sensors_platform_data { u8 drdy_int_pin; + bool open_drain; }; #endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 03b755521fd9..98c2a7c7108e 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -18,7 +18,7 @@ #define PLATFORM_DEVID_AUTO (-2) struct mfd_cell; -struct property_set; +struct property_entry; struct platform_device { const char *name; @@ -73,7 +73,7 @@ struct platform_device_info { size_t size_data; u64 dma_mask; - const struct property_set *pset; + struct property_entry *properties; }; extern struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo); @@ -172,7 +172,7 @@ extern int platform_device_add_resources(struct platform_device *pdev, extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); extern int platform_device_add_properties(struct platform_device *pdev, - const struct property_set *pset); + struct property_entry *properties); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); diff --git a/include/linux/pm.h b/include/linux/pm.h index 6a5d654f4447..06eb353182ab 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -563,7 +563,6 @@ struct dev_pm_info { bool is_suspended:1; /* Ditto */ bool is_noirq_suspended:1; bool is_late_suspended:1; - bool ignore_children:1; bool early_init:1; /* Owned by the PM core */ bool direct_complete:1; /* Owned by the PM core */ spinlock_t lock; @@ -591,6 +590,7 @@ struct dev_pm_info { unsigned int deferred_resume:1; unsigned int run_wake:1; unsigned int runtime_auto:1; + bool ignore_children:1; unsigned int no_callbacks:1; unsigned int irq_safe:1; unsigned int use_autosuspend:1; diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 308d6044f153..09779b0ae720 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -42,6 +42,7 @@ extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); extern int pm_clk_add_clk(struct device *dev, struct clk *clk); +extern int of_pm_clk_add_clk(struct device *dev, const char *name); extern int of_pm_clk_add_clks(struct device *dev); extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 49cd8890b873..31fec858088c 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -28,14 +28,12 @@ enum gpd_status { struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); - bool (*stop_ok)(struct device *dev); + bool (*suspend_ok)(struct device *dev); }; struct gpd_dev_ops { int (*start)(struct device *dev); int (*stop)(struct device *dev); - int (*save_state)(struct device *dev); - int (*restore_state)(struct device *dev); bool (*active_wakeup)(struct device *dev); }; @@ -59,7 +57,6 @@ struct generic_pm_domain { unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ - bool suspend_power_off; /* Power status before system suspend */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); struct gpd_dev_ops dev_ops; @@ -94,7 +91,7 @@ struct gpd_timing_data { s64 resume_latency_ns; s64 effective_constraint_ns; bool constraint_changed; - bool cached_stop_ok; + bool cached_suspend_ok; }; struct pm_domain_data { @@ -130,8 +127,8 @@ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -166,9 +163,10 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off) +static inline int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) { + return -ENOSYS; } #endif diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index cccaf4a29e9f..bca26157f5b6 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev); int dev_pm_opp_set_regulator(struct device *dev, const char *name); void dev_pm_opp_put_regulator(struct device *dev); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); +int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); +void dev_pm_opp_remove_table(struct device *dev); +void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); #else static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { @@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) @@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( struct device *dev) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_prop_name(struct device *dev) {} static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_regulator(struct device *dev) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { + return -ENOTSUPP; +} + +static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) +{ return -EINVAL; } +static inline void dev_pm_opp_remove_table(struct device *dev) +{ +} + +static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) +{ +} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); void dev_pm_opp_of_remove_table(struct device *dev); -int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); -void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); -int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); -int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); +int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); +void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) { } -static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) -{ - return -ENOSYS; -} - -static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) +static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { + return -ENOTSUPP; } -static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) { - return -ENOSYS; } -static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return -ENOSYS; + return -ENOTSUPP; } #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 7af093d6a4dd..2e14d2667b6c 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -56,6 +56,11 @@ extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) +{ + dev->power.ignore_children = enable; +} + static inline bool pm_children_suspended(struct device *dev) { return dev->power.ignore_children @@ -156,6 +161,7 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline bool pm_children_suspended(struct device *dev) { return false; } static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 57d146fe44dd..e856c2cb0fe8 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -26,47 +26,35 @@ * calling these symbols with arch_has_pmem_api() and redirect to the * implementation in asm/pmem.h. */ -static inline bool __arch_has_wmb_pmem(void) -{ - return false; -} - -static inline void arch_wmb_pmem(void) -{ - BUG(); -} - -static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t n) +static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) { BUG(); } -static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, - size_t n) +static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) { BUG(); return -EFAULT; } -static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { BUG(); return 0; } -static inline void arch_clear_pmem(void __pmem *addr, size_t size) +static inline void arch_clear_pmem(void *addr, size_t size) { BUG(); } -static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) +static inline void arch_wb_cache_pmem(void *addr, size_t size) { BUG(); } -static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) +static inline void arch_invalidate_pmem(void *addr, size_t size) { BUG(); } @@ -77,13 +65,6 @@ static inline bool arch_has_pmem_api(void) return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); } -static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src, - size_t size) -{ - memcpy(dst, (void __force *) src, size); - return 0; -} - /* * memcpy_from_pmem - read from persistent memory with error handling * @dst: destination buffer @@ -92,54 +73,13 @@ static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src, * * Returns 0 on success negative error code on failure. */ -static inline int memcpy_from_pmem(void *dst, void __pmem const *src, - size_t size) +static inline int memcpy_from_pmem(void *dst, void const *src, size_t size) { if (arch_has_pmem_api()) return arch_memcpy_from_pmem(dst, src, size); else - return default_memcpy_from_pmem(dst, src, size); -} - -/** - * arch_has_wmb_pmem - true if wmb_pmem() ensures durability - * - * For a given cpu implementation within an architecture it is possible - * that wmb_pmem() resolves to a nop. In the case this returns - * false, pmem api users are unable to ensure durability and may want to - * fall back to a different data consistency model, or otherwise notify - * the user. - */ -static inline bool arch_has_wmb_pmem(void) -{ - return arch_has_pmem_api() && __arch_has_wmb_pmem(); -} - -/* - * These defaults seek to offer decent performance and minimize the - * window between i/o completion and writes being durable on media. - * However, it is undefined / architecture specific whether - * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for - * making data durable relative to i/o completion. - */ -static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t size) -{ - memcpy((void __force *) dst, src, size); -} - -static inline size_t default_copy_from_iter_pmem(void __pmem *addr, - size_t bytes, struct iov_iter *i) -{ - return copy_from_iter_nocache((void __force *)addr, bytes, i); -} - -static inline void default_clear_pmem(void __pmem *addr, size_t size) -{ - if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) - clear_page((void __force *)addr); - else - memset((void __force *)addr, 0, size); + memcpy(dst, src, size); + return 0; } /** @@ -152,29 +92,14 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) * being effectively evicted from, or never written to, the processor * cache hierarchy after the copy completes. After memcpy_to_pmem() * data may still reside in cpu or platform buffers, so this operation - * must be followed by a wmb_pmem(). + * must be followed by a blkdev_issue_flush() on the pmem block device. */ -static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) +static inline void memcpy_to_pmem(void *dst, const void *src, size_t n) { if (arch_has_pmem_api()) arch_memcpy_to_pmem(dst, src, n); else - default_memcpy_to_pmem(dst, src, n); -} - -/** - * wmb_pmem - synchronize writes to persistent memory - * - * After a series of memcpy_to_pmem() operations this drains data from - * cpu write buffers and any platform (memory controller) buffers to - * ensure that written data is durable on persistent memory media. - */ -static inline void wmb_pmem(void) -{ - if (arch_has_wmb_pmem()) - arch_wmb_pmem(); - else - wmb(); + memcpy(dst, src, n); } /** @@ -184,14 +109,14 @@ static inline void wmb_pmem(void) * @i: iterator with source data * * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. - * This function requires explicit ordering with a wmb_pmem() call. + * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { if (arch_has_pmem_api()) return arch_copy_from_iter_pmem(addr, bytes, i); - return default_copy_from_iter_pmem(addr, bytes, i); + return copy_from_iter_nocache(addr, bytes, i); } /** @@ -200,14 +125,14 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, * @size: number of bytes to zero * * Write zeros into the memory range starting at 'addr' for 'size' bytes. - * This function requires explicit ordering with a wmb_pmem() call. + * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline void clear_pmem(void __pmem *addr, size_t size) +static inline void clear_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_clear_pmem(addr, size); else - default_clear_pmem(addr, size); + memset(addr, 0, size); } /** @@ -218,7 +143,7 @@ static inline void clear_pmem(void __pmem *addr, size_t size) * For platforms that support clearing poison this flushes any poisoned * ranges out of the cache */ -static inline void invalidate_pmem(void __pmem *addr, size_t size) +static inline void invalidate_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_invalidate_pmem(addr, size); @@ -230,9 +155,9 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size) * @size: number of bytes to write back * * Write back the processor cache range starting at 'addr' for 'size' bytes. - * This function requires explicit ordering with a wmb_pmem() call. + * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline void wb_cache_pmem(void __pmem *addr, size_t size) +static inline void wb_cache_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_wb_cache_pmem(addr, size); diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 5df733b8f704..2588ca6a9028 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -337,9 +337,11 @@ extern struct mutex pnp_res_mutex; #ifdef CONFIG_PNPBIOS extern struct pnp_protocol pnpbios_protocol; +extern bool arch_pnpbios_disabled(void); #define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol)) #else #define pnp_device_is_pnpbios(dev) 0 +#define arch_pnpbios_disabled() false #endif #ifdef CONFIG_PNPACPI diff --git a/include/linux/poll.h b/include/linux/poll.h index 9fb4f40d9a26..37b057b63b46 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack); -extern u64 select_estimate_accuracy(struct timespec *tv); +extern u64 select_estimate_accuracy(struct timespec64 *tv); static inline int poll_schedule(struct poll_wqueues *pwq, int state) @@ -153,12 +153,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset) #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) -extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); +extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time); extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, - struct timespec *end_time); + struct timespec64 *end_time); extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct timespec *end_time); + fd_set __user *exp, struct timespec64 *end_time); -extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); +extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, + long nsec); #endif /* _LINUX_POLL_H */ diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 3e96a6a76103..d5d3d741f028 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -43,10 +43,8 @@ struct posix_acl_entry { }; struct posix_acl { - union { - atomic_t a_refcount; - struct rcu_head a_rcu; - }; + atomic_t a_refcount; + struct rcu_head a_rcu; unsigned int a_count; struct posix_acl_entry a_entries[0]; }; @@ -81,7 +79,7 @@ posix_acl_release(struct posix_acl *acl) extern void posix_acl_init(struct posix_acl *, int); extern struct posix_acl *posix_acl_alloc(int, gfp_t); -extern int posix_acl_valid(const struct posix_acl *); +extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *); extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); @@ -99,7 +97,6 @@ extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, extern int simple_set_acl(struct inode *, struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); -struct posix_acl **acl_by_type(struct inode *inode, int type); struct posix_acl *get_cached_acl(struct inode *inode, int type); struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h index 24f51db8a83f..89d3f1cb3433 100644 --- a/include/linux/power/max8903_charger.h +++ b/include/linux/power/max8903_charger.h @@ -26,8 +26,8 @@ struct max8903_pdata { /* * GPIOs - * cen, chg, flt, and usus are optional. - * dok, dcm, and uok are not optional depending on the status of + * cen, chg, flt, dcm and usus are optional. + * dok and uok are not optional depending on the status of * dc_valid and usb_valid. */ int cen; /* Charger Enable input */ @@ -41,7 +41,7 @@ struct max8903_pdata { /* * DC(Adapter/TA) is wired * When dc_valid is true, - * dok and dcm should be valid. + * dok should be valid. * * At least one of dc_valid or usb_valid should be true. */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 751061790626..3965503315ef 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -248,6 +248,7 @@ struct power_supply { struct delayed_work deferred_register_work; spinlock_t changed_lock; bool changed; + bool initialized; atomic_t use_cnt; #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd; diff --git a/include/linux/printk.h b/include/linux/printk.h index 9ccbdf2c1453..696a56be7d3e 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -61,6 +61,11 @@ static inline void console_verbose(void) console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; } +/* strlen("ratelimit") + 1 */ +#define DEVKMSG_STR_MAX_SIZE 10 +extern char devkmsg_log_str[]; +struct ctl_table; + struct va_format { const char *fmt; va_list *va; @@ -108,11 +113,14 @@ struct va_format { * Dummy printk for disabled debugging statements to use whilst maintaining * gcc's format checking. */ -#define no_printk(fmt, ...) \ -do { \ - if (0) \ - printk(fmt, ##__VA_ARGS__); \ -} while (0) +#define no_printk(fmt, ...) \ +({ \ + do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ + } while (0); \ + 0; \ +}) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) @@ -122,7 +130,19 @@ static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif -typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); +#ifdef CONFIG_PRINTK_NMI +extern void printk_nmi_init(void); +extern void printk_nmi_enter(void); +extern void printk_nmi_exit(void); +extern void printk_nmi_flush(void); +extern void printk_nmi_flush_on_panic(void); +#else +static inline void printk_nmi_init(void) { } +static inline void printk_nmi_enter(void) { } +static inline void printk_nmi_exit(void) { } +static inline void printk_nmi_flush(void) { } +static inline void printk_nmi_flush_on_panic(void) { } +#endif /* PRINTK_NMI */ #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) @@ -160,6 +180,10 @@ extern int printk_delay_msec; extern int dmesg_restrict; extern int kptr_restrict; +extern int +devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, + size_t *lenp, loff_t *ppos); + extern void wake_up_klogd(void); char *log_buf_addr_get(void); @@ -274,10 +298,11 @@ extern asmlinkage void dump_stack(void) __cold; no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif -#include <linux/dynamic_debug.h> /* If you are writing a driver, please use dev_dbg instead */ #if defined(CONFIG_DYNAMIC_DEBUG) +#include <linux/dynamic_debug.h> + /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ #define pr_debug(fmt, ...) \ dynamic_pr_debug(fmt, ##__VA_ARGS__) @@ -297,20 +322,24 @@ extern asmlinkage void dump_stack(void) __cold; #define printk_once(fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk(fmt, ##__VA_ARGS__); \ } \ + unlikely(__ret_print_once); \ }) #define printk_deferred_once(fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk_deferred(fmt, ##__VA_ARGS__); \ } \ + unlikely(__ret_print_once); \ }) #else #define printk_once(fmt, ...) \ diff --git a/include/linux/property.h b/include/linux/property.h index b51fcd36d892..856e50b2140c 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -77,6 +77,9 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) +struct fwnode_handle *device_get_named_child_node(struct device *dev, + const char *childname); + void fwnode_handle_put(struct fwnode_handle *fwnode); unsigned int device_get_child_node_count(struct device *dev); @@ -187,7 +190,7 @@ struct property_entry { .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ .is_array = true, \ .is_string = false, \ - { .pointer = { _type_##_data = _val_ } }, \ + { .pointer = { ._type_##_data = _val_ } }, \ } #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ @@ -238,18 +241,9 @@ struct property_entry { .name = _name_, \ } -/** - * struct property_set - Collection of "built-in" device properties. - * @fwnode: Handle to be pointed to by the fwnode field of struct device. - * @properties: Array of properties terminated with a null entry. - */ -struct property_set { - struct fwnode_handle fwnode; - struct property_entry *properties; -}; - -int device_add_property_set(struct device *dev, const struct property_set *pset); -void device_remove_property_set(struct device *dev); +int device_add_properties(struct device *dev, + struct property_entry *properties); +void device_remove_properties(struct device *dev); bool device_dma_supported(struct device *dev); diff --git a/include/linux/proportions.h b/include/linux/proportions.h deleted file mode 100644 index 21221338ad18..000000000000 --- a/include/linux/proportions.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * FLoating proportions - * - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra - * - * This file contains the public data structure and API definitions. - */ - -#ifndef _LINUX_PROPORTIONS_H -#define _LINUX_PROPORTIONS_H - -#include <linux/percpu_counter.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> -#include <linux/gfp.h> - -struct prop_global { - /* - * The period over which we differentiate - * - * period = 2^shift - */ - int shift; - /* - * The total event counter aka 'time'. - * - * Treated as an unsigned long; the lower 'shift - 1' bits are the - * counter bits, the remaining upper bits the period counter. - */ - struct percpu_counter events; -}; - -/* - * global proportion descriptor - * - * this is needed to consistently flip prop_global structures. - */ -struct prop_descriptor { - int index; - struct prop_global pg[2]; - struct mutex mutex; /* serialize the prop_global switch */ -}; - -int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp); -void prop_change_shift(struct prop_descriptor *pd, int new_shift); - -/* - * ----- PERCPU ------ - */ - -struct prop_local_percpu { - /* - * the local events counter - */ - struct percpu_counter events; - - /* - * snapshot of the last seen global state - */ - int shift; - unsigned long period; - raw_spinlock_t lock; /* protect the snapshot state */ -}; - -int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp); -void prop_local_destroy_percpu(struct prop_local_percpu *pl); -void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); -void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, - long *numerator, long *denominator); - -static inline -void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) -{ - unsigned long flags; - - local_irq_save(flags); - __prop_inc_percpu(pd, pl); - local_irq_restore(flags); -} - -/* - * Limit the time part in order to ensure there are some bits left for the - * cycle counter and fraction multiply. - */ -#if BITS_PER_LONG == 32 -#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) -#else -#define PROP_MAX_SHIFT (BITS_PER_LONG/2) -#endif - -#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1) -#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT) - -void __prop_inc_percpu_max(struct prop_descriptor *pd, - struct prop_local_percpu *pl, long frac); - - -/* - * ----- SINGLE ------ - */ - -struct prop_local_single { - /* - * the local events counter - */ - unsigned long events; - - /* - * snapshot of the last seen global state - * and a lock protecting this state - */ - unsigned long period; - int shift; - raw_spinlock_t lock; /* protect the snapshot state */ -}; - -#define INIT_PROP_LOCAL_SINGLE(name) \ -{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ -} - -int prop_local_init_single(struct prop_local_single *pl); -void prop_local_destroy_single(struct prop_local_single *pl); -void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); -void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, - long *numerator, long *denominator); - -static inline -void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) -{ - unsigned long flags; - - local_irq_save(flags); - __prop_inc_single(pd, pl); - local_irq_restore(flags); -} - -#endif /* _LINUX_PROPORTIONS_H */ diff --git a/include/linux/psci.h b/include/linux/psci.h index 393efe2edf9a..bdea1cb5e1db 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -21,8 +21,6 @@ #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 bool psci_tos_resident_on(int cpu); -bool psci_power_state_loses_context(u32 state); -bool psci_power_state_is_valid(u32 state); int psci_cpu_init_idle(unsigned int cpu); int psci_cpu_suspend_enter(unsigned long index); diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 831479f8df8f..899e95e84400 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -58,7 +58,8 @@ struct pstore_info { int (*close)(struct pstore_info *psi); ssize_t (*read)(u64 *id, enum pstore_type_id *type, int *count, struct timespec *time, char **buf, - bool *compressed, struct pstore_info *psi); + bool *compressed, ssize_t *ecc_notice_size, + struct pstore_info *psi); int (*write)(enum pstore_type_id type, enum kmsg_dump_reason reason, u64 *id, unsigned int part, int count, bool compressed, diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h new file mode 100644 index 000000000000..2052011bf9fb --- /dev/null +++ b/include/linux/ptr_ring.h @@ -0,0 +1,448 @@ +/* + * Definitions for the 'struct ptr_ring' datastructure. + * + * Author: + * Michael S. Tsirkin <mst@redhat.com> + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This is a limited-size FIFO maintaining pointers in FIFO order, with + * one CPU producing entries and another consuming entries from a FIFO. + * + * This implementation tries to minimize cache-contention when there is a + * single producer and a single consumer CPU. + */ + +#ifndef _LINUX_PTR_RING_H +#define _LINUX_PTR_RING_H 1 + +#ifdef __KERNEL__ +#include <linux/spinlock.h> +#include <linux/cache.h> +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/cache.h> +#include <linux/slab.h> +#include <asm/errno.h> +#endif + +struct ptr_ring { + int producer ____cacheline_aligned_in_smp; + spinlock_t producer_lock; + int consumer ____cacheline_aligned_in_smp; + spinlock_t consumer_lock; + /* Shared consumer/producer data */ + /* Read-only by both the producer and the consumer */ + int size ____cacheline_aligned_in_smp; /* max entries in queue */ + void **queue; +}; + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). If ring is ever resized, callers must hold + * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold + * producer_lock, the next call to __ptr_ring_produce may fail. + */ +static inline bool __ptr_ring_full(struct ptr_ring *r) +{ + return r->queue[r->producer]; +} + +static inline bool ptr_ring_full(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_full(r); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_full_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must hold producer_lock. + */ +static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + if (unlikely(!r->size) || r->queue[r->producer]) + return -ENOSPC; + + r->queue[r->producer++] = ptr; + if (unlikely(r->producer >= r->size)) + r->producer = 0; + return 0; +} + +static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must take consumer_lock + * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. + * If ring is never resized, and if the pointer is merely + * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. + */ +static inline void *__ptr_ring_peek(struct ptr_ring *r) +{ + if (likely(r->size)) + return r->queue[r->consumer]; + return NULL; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must take consumer_lock + * if the ring is ever resized - see e.g. ptr_ring_empty. + */ +static inline bool __ptr_ring_empty(struct ptr_ring *r) +{ + return !__ptr_ring_peek(r); +} + +static inline bool ptr_ring_empty(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_empty(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_empty_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Must only be called after __ptr_ring_peek returned !NULL */ +static inline void __ptr_ring_discard_one(struct ptr_ring *r) +{ + r->queue[r->consumer++] = NULL; + if (unlikely(r->consumer >= r->size)) + r->consumer = 0; +} + +static inline void *__ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + ptr = __ptr_ring_peek(r); + if (ptr) + __ptr_ring_discard_one(r); + + return ptr; +} + +static inline void *ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + spin_lock(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_irq(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_irq(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_irq(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_any(struct ptr_ring *r) +{ + unsigned long flags; + void *ptr; + + spin_lock_irqsave(&r->consumer_lock, flags); + ptr = __ptr_ring_consume(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ptr; +} + +static inline void *ptr_ring_consume_bh(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_bh(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_bh(&r->consumer_lock); + + return ptr; +} + +/* Cast to structure type and call a function without discarding from FIFO. + * Function must return a value. + * Callers must take consumer_lock. + */ +#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) + +#define PTR_RING_PEEK_CALL(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_BH(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + unsigned long __PTR_RING_PEEK_CALL_f;\ + \ + spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) +{ + return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); +} + +static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +{ + r->queue = __ptr_ring_init_queue_alloc(size, gfp); + if (!r->queue) + return -ENOMEM; + + r->size = size; + r->producer = r->consumer = 0; + spin_lock_init(&r->producer_lock); + spin_lock_init(&r->consumer_lock); + + return 0; +} + +static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, + int size, gfp_t gfp, + void (*destroy)(void *)) +{ + int producer = 0; + void **old; + void *ptr; + + while ((ptr = ptr_ring_consume(r))) + if (producer < size) + queue[producer++] = ptr; + else if (destroy) + destroy(ptr); + + r->size = size; + r->producer = producer; + r->consumer = 0; + old = r->queue; + r->queue = queue; + + return old; +} + +static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, + void (*destroy)(void *)) +{ + unsigned long flags; + void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **old; + + if (!queue) + return -ENOMEM; + + spin_lock_irqsave(&(r)->producer_lock, flags); + + old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); + + spin_unlock_irqrestore(&(r)->producer_lock, flags); + + kfree(old); + + return 0; +} + +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) +{ + unsigned long flags; + void ***queues; + int i; + + queues = kmalloc(nrings * sizeof *queues, gfp); + if (!queues) + goto noqueues; + + for (i = 0; i < nrings; ++i) { + queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + if (!queues[i]) + goto nomem; + } + + for (i = 0; i < nrings; ++i) { + spin_lock_irqsave(&(rings[i])->producer_lock, flags); + queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], + size, gfp, destroy); + spin_unlock_irqrestore(&(rings[i])->producer_lock, flags); + } + + for (i = 0; i < nrings; ++i) + kfree(queues[i]); + + kfree(queues); + + return 0; + +nomem: + while (--i >= 0) + kfree(queues[i]); + + kfree(queues); + +noqueues: + return -ENOMEM; +} + +static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) +{ + void *ptr; + + if (destroy) + while ((ptr = ptr_ring_consume(r))) + destroy(ptr); + kfree(r->queue); +} + +#endif /* _LINUX_PTR_RING_H */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index cfc3ed46cad2..f1bbae014889 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -5,59 +5,9 @@ #include <linux/mutex.h> #include <linux/of.h> -struct pwm_device; +struct pwm_capture; struct seq_file; -#if IS_ENABLED(CONFIG_PWM) -/* - * pwm_request - request a PWM device - */ -struct pwm_device *pwm_request(int pwm_id, const char *label); - -/* - * pwm_free - free a PWM device - */ -void pwm_free(struct pwm_device *pwm); - -/* - * pwm_config - change a PWM device configuration - */ -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns); - -/* - * pwm_enable - start a PWM output toggling - */ -int pwm_enable(struct pwm_device *pwm); - -/* - * pwm_disable - stop a PWM output toggling - */ -void pwm_disable(struct pwm_device *pwm); -#else -static inline struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - return ERR_PTR(-ENODEV); -} - -static inline void pwm_free(struct pwm_device *pwm) -{ -} - -static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) -{ - return -EINVAL; -} - -static inline int pwm_enable(struct pwm_device *pwm) -{ - return -EINVAL; -} - -static inline void pwm_disable(struct pwm_device *pwm) -{ -} -#endif - struct pwm_chip; /** @@ -74,10 +24,41 @@ enum pwm_polarity { PWM_POLARITY_INVERSED, }; +/** + * struct pwm_args - board-dependent PWM arguments + * @period: reference period + * @polarity: reference polarity + * + * This structure describes board-dependent arguments attached to a PWM + * device. These arguments are usually retrieved from the PWM lookup table or + * device tree. + * + * Do not confuse this with the PWM state: PWM arguments represent the initial + * configuration that users want to use on this PWM device rather than the + * current PWM hardware state. + */ +struct pwm_args { + unsigned int period; + enum pwm_polarity polarity; +}; + enum { PWMF_REQUESTED = 1 << 0, - PWMF_ENABLED = 1 << 1, - PWMF_EXPORTED = 1 << 2, + PWMF_EXPORTED = 1 << 1, +}; + +/* + * struct pwm_state - state of a PWM channel + * @period: PWM period (in nanoseconds) + * @duty_cycle: PWM duty cycle (in nanoseconds) + * @polarity: PWM polarity + * @enabled: PWM enabled status + */ +struct pwm_state { + unsigned int period; + unsigned int duty_cycle; + enum pwm_polarity polarity; + bool enabled; }; /** @@ -88,10 +69,8 @@ enum { * @pwm: global index of the PWM device * @chip: PWM chip providing this PWM device * @chip_data: chip-private data associated with the PWM device - * @lock: used to serialize accesses to the PWM device where necessary - * @period: period of the PWM signal (in nanoseconds) - * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) - * @polarity: polarity of the PWM signal + * @args: PWM arguments + * @state: curent PWM channel state */ struct pwm_device { const char *label; @@ -100,48 +79,162 @@ struct pwm_device { unsigned int pwm; struct pwm_chip *chip; void *chip_data; - struct mutex lock; - unsigned int period; - unsigned int duty_cycle; - enum pwm_polarity polarity; + struct pwm_args args; + struct pwm_state state; }; +/** + * pwm_get_state() - retrieve the current PWM state + * @pwm: PWM device + * @state: state to fill with the current PWM state + */ +static inline void pwm_get_state(const struct pwm_device *pwm, + struct pwm_state *state) +{ + *state = pwm->state; +} + static inline bool pwm_is_enabled(const struct pwm_device *pwm) { - return test_bit(PWMF_ENABLED, &pwm->flags); + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.enabled; } static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) { if (pwm) - pwm->period = period; + pwm->state.period = period; } static inline unsigned int pwm_get_period(const struct pwm_device *pwm) { - return pwm ? pwm->period : 0; + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.period; } static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) { if (pwm) - pwm->duty_cycle = duty; + pwm->state.duty_cycle = duty; } static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) { - return pwm ? pwm->duty_cycle : 0; + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.duty_cycle; } -/* - * pwm_set_polarity - configure the polarity of a PWM signal +static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) +{ + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.polarity; +} + +static inline void pwm_get_args(const struct pwm_device *pwm, + struct pwm_args *args) +{ + *args = pwm->args; +} + +/** + * pwm_init_state() - prepare a new state to be applied with pwm_apply_state() + * @pwm: PWM device + * @state: state to fill with the prepared PWM state + * + * This functions prepares a state that can later be tweaked and applied + * to the PWM device with pwm_apply_state(). This is a convenient function + * that first retrieves the current PWM state and the replaces the period + * and polarity fields with the reference values defined in pwm->args. + * Once the function returns, you can adjust the ->enabled and ->duty_cycle + * fields according to your needs before calling pwm_apply_state(). + * + * ->duty_cycle is initially set to zero to avoid cases where the current + * ->duty_cycle value exceed the pwm_args->period one, which would trigger + * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle + * first. */ -int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); +static inline void pwm_init_state(const struct pwm_device *pwm, + struct pwm_state *state) +{ + struct pwm_args args; -static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) + /* First get the current state. */ + pwm_get_state(pwm, state); + + /* Then fill it with the reference config */ + pwm_get_args(pwm, &args); + + state->period = args.period; + state->polarity = args.polarity; + state->duty_cycle = 0; +} + +/** + * pwm_get_relative_duty_cycle() - Get a relative duty cycle value + * @state: PWM state to extract the duty cycle from + * @scale: target scale of the relative duty cycle + * + * This functions converts the absolute duty cycle stored in @state (expressed + * in nanosecond) into a value relative to the period. + * + * For example if you want to get the duty_cycle expressed in percent, call: + * + * pwm_get_state(pwm, &state); + * duty = pwm_get_relative_duty_cycle(&state, 100); + */ +static inline unsigned int +pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale) { - return pwm ? pwm->polarity : PWM_POLARITY_NORMAL; + if (!state->period) + return 0; + + return DIV_ROUND_CLOSEST_ULL((u64)state->duty_cycle * scale, + state->period); +} + +/** + * pwm_set_relative_duty_cycle() - Set a relative duty cycle value + * @state: PWM state to fill + * @duty_cycle: relative duty cycle value + * @scale: scale in which @duty_cycle is expressed + * + * This functions converts a relative into an absolute duty cycle (expressed + * in nanoseconds), and puts the result in state->duty_cycle. + * + * For example if you want to configure a 50% duty cycle, call: + * + * pwm_init_state(pwm, &state); + * pwm_set_relative_duty_cycle(&state, 50, 100); + * pwm_apply_state(pwm, &state); + * + * This functions returns -EINVAL if @duty_cycle and/or @scale are + * inconsistent (@scale == 0 or @duty_cycle > @scale). + */ +static inline int +pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, + unsigned int scale) +{ + if (!scale || duty_cycle > scale) + return -EINVAL; + + state->duty_cycle = DIV_ROUND_CLOSEST_ULL((u64)duty_cycle * + state->period, + scale); + + return 0; } /** @@ -150,8 +243,16 @@ static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) * @free: optional hook for freeing a PWM * @config: configure duty cycles and period length for this PWM * @set_polarity: configure the polarity of this PWM + * @capture: capture and report PWM signal * @enable: enable PWM output toggling * @disable: disable PWM output toggling + * @apply: atomically apply a new PWM config. The state argument + * should be adjusted with the real hardware config (if the + * approximate the period or duty_cycle value, state should + * reflect it) + * @get_state: get the current PWM state. This function is only + * called once per PWM device when the PWM chip is + * registered. * @dbg_show: optional routine to show contents in debugfs * @owner: helps prevent removal of modules exporting active PWMs */ @@ -162,8 +263,14 @@ struct pwm_ops { int duty_ns, int period_ns); int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity); + int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_capture *result, unsigned long timeout); int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); + int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); + void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); #ifdef CONFIG_DEBUG_FS void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); #endif @@ -198,7 +305,130 @@ struct pwm_chip { bool can_sleep; }; +/** + * struct pwm_capture - PWM capture data + * @period: period of the PWM signal (in nanoseconds) + * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) + */ +struct pwm_capture { + unsigned int period; + unsigned int duty_cycle; +}; + #if IS_ENABLED(CONFIG_PWM) +/* PWM user APIs */ +struct pwm_device *pwm_request(int pwm_id, const char *label); +void pwm_free(struct pwm_device *pwm); +int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state); +int pwm_adjust_config(struct pwm_device *pwm); + +/** + * pwm_config() - change a PWM device configuration + * @pwm: PWM device + * @duty_ns: "on" time (in nanoseconds) + * @period_ns: duration (in nanoseconds) of one cycle + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + if (duty_ns < 0 || period_ns < 0) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.duty_cycle == duty_ns && state.period == period_ns) + return 0; + + state.duty_cycle = duty_ns; + state.period = period_ns; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_set_polarity() - configure the polarity of a PWM signal + * @pwm: PWM device + * @polarity: new polarity of the PWM signal + * + * Note that the polarity cannot be configured while the PWM device is + * enabled. + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.polarity == polarity) + return 0; + + /* + * Changing the polarity of a running PWM without adjusting the + * dutycycle/period value is a bit risky (can introduce glitches). + * Return -EBUSY in this case. + * Note that this is allowed when using pwm_apply_state() because + * the user specifies all the parameters. + */ + if (state.enabled) + return -EBUSY; + + state.polarity = polarity; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_enable() - start a PWM output toggling + * @pwm: PWM device + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_enable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.enabled) + return 0; + + state.enabled = true; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_disable() - stop a PWM output toggling + * @pwm: PWM device + */ +static inline void pwm_disable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return; + + pwm_get_state(pwm, &state); + if (!state.enabled) + return; + + state.enabled = false; + pwm_apply_state(pwm, &state); +} + +/* PWM provider APIs */ +int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, + unsigned long timeout); int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); @@ -224,6 +454,54 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm); bool pwm_can_sleep(struct pwm_device *pwm); #else +static inline struct pwm_device *pwm_request(int pwm_id, const char *label) +{ + return ERR_PTR(-ENODEV); +} + +static inline void pwm_free(struct pwm_device *pwm) +{ +} + +static inline int pwm_apply_state(struct pwm_device *pwm, + const struct pwm_state *state) +{ + return -ENOTSUPP; +} + +static inline int pwm_adjust_config(struct pwm_device *pwm) +{ + return -ENOTSUPP; +} + +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + return -EINVAL; +} + +static inline int pwm_capture(struct pwm_device *pwm, + struct pwm_capture *result, + unsigned long timeout) +{ + return -EINVAL; +} + +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + return -ENOTSUPP; +} + +static inline int pwm_enable(struct pwm_device *pwm) +{ + return -EINVAL; +} + +static inline void pwm_disable(struct pwm_device *pwm) +{ +} + static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) { return -EINVAL; @@ -295,6 +573,38 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm) } #endif +static inline void pwm_apply_args(struct pwm_device *pwm) +{ + struct pwm_state state = { }; + + /* + * PWM users calling pwm_apply_args() expect to have a fresh config + * where the polarity and period are set according to pwm_args info. + * The problem is, polarity can only be changed when the PWM is + * disabled. + * + * PWM drivers supporting hardware readout may declare the PWM device + * as enabled, and prevent polarity setting, which changes from the + * existing behavior, where all PWM devices are declared as disabled + * at startup (even if they are actually enabled), thus authorizing + * polarity setting. + * + * To fulfill this requirement, we apply a new state which disables + * the PWM device and set the reference period and polarity config. + * + * Note that PWM users requiring a smooth handover between the + * bootloader and the kernel (like critical regulators controlled by + * PWM devices) will have to switch to the atomic API and avoid calling + * pwm_apply_args(). + */ + + state.enabled = false; + state.polarity = pwm->args.polarity; + state.period = pwm->args.period; + + pwm_apply_state(pwm, &state); +} + struct pwm_lookup { struct list_head list; const char *provider; diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 9e12000914b3..cc32ab852fbc 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -29,6 +29,14 @@ extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); +extern bool qcom_scm_pas_supported(u32 peripheral); +extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, + size_t size); +extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, + phys_addr_t size); +extern int qcom_scm_pas_auth_and_reset(u32 peripheral); +extern int qcom_scm_pas_shutdown(u32 peripheral); + #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 53ecb37ae563..40c0ada01806 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -12,10 +12,21 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 #define X_FINAL_CLEANUP_AGG_INT 1 +#define NUM_OF_GLOBAL_QUEUES 128 + +/* Queue Zone sizes in bytes */ +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 0 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 8 +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 + +#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 7 -#define FW_REVISION_VERSION 3 +#define FW_MINOR_VERSION 10 +#define FW_REVISION_VERSION 5 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -97,45 +108,86 @@ #define DQ_XCM_AGG_VAL_SEL_REG6 7 /* XCM agg val selection */ -#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD2 -#define DQ_XCM_ETH_TX_BD_CONS_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_CORE_TX_BD_CONS_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ETH_TX_BD_PROD_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_TX_BD_PROD_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_SPQ_PROD_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 + +/* UCM agg val selection (HW) */ +#define DQ_UCM_AGG_VAL_SEL_WORD0 0 +#define DQ_UCM_AGG_VAL_SEL_WORD1 1 +#define DQ_UCM_AGG_VAL_SEL_WORD2 2 +#define DQ_UCM_AGG_VAL_SEL_WORD3 3 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 + +/* UCM agg val selection (FW) */ +#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 +#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3 +#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0 +#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2 + +/* TCM agg val selection (HW) */ +#define DQ_TCM_AGG_VAL_SEL_WORD0 0 +#define DQ_TCM_AGG_VAL_SEL_WORD1 1 +#define DQ_TCM_AGG_VAL_SEL_WORD2 2 +#define DQ_TCM_AGG_VAL_SEL_WORD3 3 +#define DQ_TCM_AGG_VAL_SEL_REG1 4 +#define DQ_TCM_AGG_VAL_SEL_REG2 5 +#define DQ_TCM_AGG_VAL_SEL_REG6 6 +#define DQ_TCM_AGG_VAL_SEL_REG9 7 + +/* TCM agg val selection (FW) */ +#define DQ_TCM_L2B_BD_PROD_CMD \ + DQ_TCM_AGG_VAL_SEL_WORD1 +#define DQ_TCM_ROCE_RQ_PROD_CMD \ + DQ_TCM_AGG_VAL_SEL_WORD0 /* XCM agg counter flag selection */ -#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 -#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 -#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 -#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 -#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 -#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 -#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 -#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 /* XCM agg counter flag selection */ -#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23) + +/* UCM agg counter flag selection (HW) */ +#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 +#define DQ_UCM_AGG_FLG_SHIFT_CF1 1 +#define DQ_UCM_AGG_FLG_SHIFT_CF3 2 +#define DQ_UCM_AGG_FLG_SHIFT_CF4 3 +#define DQ_UCM_AGG_FLG_SHIFT_CF5 4 +#define DQ_UCM_AGG_FLG_SHIFT_CF6 5 +#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6 +#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 + +/* UCM agg counter flag selection (FW) */ +#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) + +#define DQ_REGION_SHIFT (12) + +/* DPM */ +#define DQ_DPM_WQE_BUFF_SIZE (320) + +/* Conn type ranges */ +#define DQ_CONN_TYPE_RANGE_SHIFT (4) /*****************/ /* QM CONSTANTS */ @@ -282,8 +334,66 @@ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) -#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 -#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 @@ -322,15 +432,64 @@ struct async_data { u8 fw_debug_param; }; +struct coalescing_timeset { + u8 value; +#define COALESCING_TIMESET_TIMESET_MASK 0x7F +#define COALESCING_TIMESET_TIMESET_SHIFT 0 +#define COALESCING_TIMESET_VALID_MASK 0x1 +#define COALESCING_TIMESET_VALID_SHIFT 7 +}; + +struct common_prs_pf_msg_info { + __le32 value; +#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3 +#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF +#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4 +}; + +struct common_queue_zone { + __le16 ring_drv_data_consumer; + __le16 reserved; +}; + +struct eth_rx_prod_data { + __le16 bd_prod; + __le16 cqe_prod; +}; + struct regpair { __le32 lo; __le32 hi; }; +struct vf_pf_channel_eqe_data { + struct regpair msg_addr; +}; + +struct malicious_vf_eqe_data { + u8 vf_id; + u8 err_id; + __le16 reserved[3]; +}; + +struct initial_cleanup_eqe_data { + u8 vf_id; + u8 reserved[7]; +}; + /* Event Data Union */ union event_ring_data { - u8 bytes[8]; - struct async_data async_info; + u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; + struct malicious_vf_eqe_data malicious_vf; + struct initial_cleanup_eqe_data vf_init_cleanup; }; /* Event Ring Entry */ @@ -358,9 +517,9 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { - PROTOCOLID_RESERVED1, + PROTOCOLID_ISCSI, PROTOCOLID_RESERVED2, - PROTOCOLID_RESERVED3, + PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, PROTOCOLID_RESERVED4, @@ -371,6 +530,16 @@ enum protocol_type { MAX_PROTOCOL_TYPE }; +struct ustorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[3]; +}; + +struct ustorm_queue_zone { + struct ustorm_eth_queue_zone eth; + struct common_queue_zone common; +}; + /* status block structure */ struct cau_pi_entry { u32 prod; @@ -526,7 +695,10 @@ struct parsing_and_err_flags { #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; -/* Concrete Function ID. */ +struct pb_context { + __le32 crc[4]; +}; + struct pxp_concrete_fid { __le16 fid; #define PXP_CONCRETE_FID_PFID_MASK 0xF @@ -593,6 +765,72 @@ struct pxp_ptt_entry { }; /* RSS hash type */ +struct rdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + u8 flags0; +#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 +#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 +#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 + u8 partial_dif_data[7]; + __le16 partial_crc_value; + __le16 partial_checksum_value; + __le32 offset_in_io; + __le16 flags1; +#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 +#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 +#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 +#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 +#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 + __le16 state; +#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 +#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 + __le32 reserved2; +}; + enum rss_hash_type { RSS_HASH_TYPE_DEFAULT = 0, RSS_HASH_TYPE_IPV4 = 1, @@ -621,19 +859,122 @@ struct status_block { #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; -struct tunnel_parsing_flags { - u8 flags; -#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 -#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 -#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 -#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 -#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 -#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 -#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 -#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +struct tdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + __le16 partial_crc_valueB; + __le16 partial_checksum_valueB; + __le16 stateB; +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 + u8 reserved1; + u8 flags0; +#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 +#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 +#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 + __le32 flags1; +#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 +#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 +#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 +#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 +#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 +#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 +#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_iob; + __le16 partial_crc_value_a; + __le16 partial_checksum_valuea_; + __le32 offset_in_ioa; + u8 partial_dif_data_a[8]; + u8 partial_dif_data_b[8]; +}; + +struct timers_context { + __le32 logical_client0; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 + __le32 logical_client1; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 + __le32 logical_client2; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 30 + __le32 host_expiration_fields; +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 }; #endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 092cb0c1afcb..b5ebc697d05f 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -12,6 +12,8 @@ /********************/ /* ETH FW CONSTANTS */ /********************/ +#define ETH_HSI_VER_MAJOR 3 +#define ETH_HSI_VER_MINOR 0 #define ETH_CACHE_LINE_SIZE 64 #define ETH_MAX_RAMROD_PER_CON 8 @@ -57,19 +59,6 @@ #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 #define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 -/* Queue Zone sizes */ -#define TSTORM_QZONE_SIZE 0 -#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone) -#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone) -#define XSTORM_QZONE_SIZE 0 -#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone) -#define PSTORM_QZONE_SIZE 0 - -/* Interrupt coalescing TimeSet */ -struct coalescing_timeset { - u8 timeset; - u8 valid; -}; struct eth_tx_1st_bd_flags { u8 bitfields; @@ -97,12 +86,12 @@ struct eth_tx_data_1st_bd { u8 nbds; struct eth_tx_1st_bd_flags bd_flags; __le16 bitfields; -#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 #define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 -#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF -#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2 +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 }; /* The parsing information data for the second tx bd of a given packet. */ @@ -136,28 +125,51 @@ struct eth_tx_data_2nd_bd { #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; +struct eth_fast_path_cqe_fw_debug { + u8 reserved0; + u8 reserved1; + __le16 reserved2; +}; + +/* tunneling parsing flags */ +struct eth_tunnel_parsing_flags { + u8 flags; +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; + /* Regular ETH Rx FP CQE. */ struct eth_fast_path_rx_reg_cqe { - u8 type; - u8 bitfields; + u8 type; + u8 bitfields; #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 #define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF #define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 - __le16 pkt_len; - struct parsing_and_err_flags pars_flags; - __le16 vlan_tag; - __le32 rss_hash; - __le16 len_on_first_bd; - u8 placement_offset; - struct tunnel_parsing_flags tunnel_pars_flags; - u8 bd_num; - u8 reserved[7]; - u32 fw_debug; - u8 reserved1[3]; - u8 flags; + __le16 pkt_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; + struct eth_tunnel_parsing_flags tunnel_pars_flags; + u8 bd_num; + u8 reserved[7]; + struct eth_fast_path_cqe_fw_debug fw_debug; + u8 reserved1[3]; + u8 flags; #define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 #define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 #define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 @@ -207,11 +219,11 @@ struct eth_fast_path_rx_tpa_start_cqe { __le32 rss_hash; __le16 len_on_first_bd; u8 placement_offset; - struct tunnel_parsing_flags tunnel_pars_flags; + struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 tpa_agg_index; u8 header_len; __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; - u32 fw_debug; + struct eth_fast_path_cqe_fw_debug fw_debug; }; /* The L4 pseudo checksum mode for Ethernet */ @@ -264,12 +276,25 @@ enum eth_rx_cqe_type { MAX_ETH_RX_CQE_TYPE }; -/* ETH Rx producers data */ -struct eth_rx_prod_data { - __le16 bd_prod; - __le16 cqe_prod; - __le16 reserved; - __le16 reserved1; +enum eth_rx_tunn_type { + ETH_RX_NO_TUNN, + ETH_RX_TUNN_GENEVE, + ETH_RX_TUNN_GRE, + ETH_RX_TUNN_VXLAN, + MAX_ETH_RX_TUNN_TYPE +}; + +/* Aggregation end reason. */ +enum eth_tpa_end_reason { + ETH_AGG_END_UNUSED, + ETH_AGG_END_SP_UPDATE, + ETH_AGG_END_MAX_LEN, + ETH_AGG_END_LAST_SEG, + ETH_AGG_END_TIMEOUT, + ETH_AGG_END_NOT_CONSISTENT, + ETH_AGG_END_OUT_OF_ORDER, + ETH_AGG_END_NON_TPA_SEG, + MAX_ETH_TPA_END_REASON }; /* The first tx bd of a given packet */ @@ -337,21 +362,18 @@ union eth_tx_bd_types { }; /* Mstorm Queue Zone */ -struct mstorm_eth_queue_zone { - struct eth_rx_prod_data rx_producers; - __le32 reserved[2]; -}; - -/* Ustorm Queue Zone */ -struct ustorm_eth_queue_zone { - struct coalescing_timeset int_coalescing_timeset; - __le16 reserved[3]; +enum eth_tx_tunn_type { + ETH_TX_TUNN_GENEVE, + ETH_TX_TUNN_TTAG, + ETH_TX_TUNN_GRE, + ETH_TX_TUNN_VXLAN, + MAX_ETH_TX_TUNN_TYPE }; /* Ystorm Queue Zone */ -struct ystorm_eth_queue_zone { - struct coalescing_timeset int_coalescing_timeset; - __le16 reserved[3]; +struct xstorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[7]; }; /* ETH doorbell data */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h new file mode 100644 index 000000000000..b3c0feb15ae9 --- /dev/null +++ b/include/linux/qed/iscsi_common.h @@ -0,0 +1,1439 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __ISCSI_COMMON__ +#define __ISCSI_COMMON__ +/**********************/ +/* ISCSI FW CONSTANTS */ +/**********************/ + +/* iSCSI HSI constants */ +#define ISCSI_DEFAULT_MTU (1500) + +/* Current iSCSI HSI version number composed of two fields (16 bit) */ +#define ISCSI_HSI_MAJOR_VERSION (0) +#define ISCSI_HSI_MINOR_VERSION (0) + +/* KWQ (kernel work queue) layer codes */ +#define ISCSI_SLOW_PATH_LAYER_CODE (6) + +/* CQE completion status */ +#define ISCSI_EQE_COMPLETION_SUCCESS (0x0) +#define ISCSI_EQE_RST_CONN_RCVD (0x1) + +/* iSCSI parameter defaults */ +#define ISCSI_DEFAULT_HEADER_DIGEST (0) +#define ISCSI_DEFAULT_DATA_DIGEST (0) +#define ISCSI_DEFAULT_INITIAL_R2T (1) +#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) +#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) +#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) +#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) + +/* iSCSI parameter limits */ +#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) +#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) +#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) +#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) + +/* iSCSI reserved params */ +#define ISCSI_ITT_ALL_ONES (0xffffffff) +#define ISCSI_TTT_ALL_ONES (0xffffffff) + +#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 +#define ISCSI_OPTION_2_ON_CHIP_TCP 2 + +#define ISCSI_INITIATOR_MODE 0 +#define ISCSI_TARGET_MODE 1 + +/* iSCSI request op codes */ +#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0) +#define ISCSI_OPCODE_NOP_OUT ( \ + ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40) +#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1) +#define ISCSI_OPCODE_SCSI_CMD ( \ + ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40) +#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2) +#define ISCSI_OPCODE_TMF_REQUEST ( \ + ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3) +#define ISCSI_OPCODE_LOGIN_REQUEST ( \ + ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4) +#define ISCSI_OPCODE_TEXT_REQUEST ( \ + ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6) +#define ISCSI_OPCODE_LOGOUT_REQUEST ( \ + ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40) + +/* iSCSI response/messages op codes */ +#define ISCSI_OPCODE_NOP_IN (0x20) +#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) +#define ISCSI_OPCODE_TMF_RESPONSE (0x22) +#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) +#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) +#define ISCSI_OPCODE_DATA_IN (0x25) +#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) +#define ISCSI_OPCODE_R2T (0x31) +#define ISCSI_OPCODE_ASYNC_MSG (0x32) +#define ISCSI_OPCODE_REJECT (0x3f) + +/* iSCSI stages */ +#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) +#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) +#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) + +/* iSCSI CQE errors */ +#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) +#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) + +struct cqe_error_bitmap { + u8 cqe_error_status_bits; +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 +#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 +#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 +}; + +union cqe_error_status { + u8 error_status; + struct cqe_error_bitmap error_bits; +}; + +struct data_hdr { + __le32 data[12]; +}; + +struct iscsi_async_msg_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F +#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 +#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 all_ones; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 param1_rsrv; + u8 async_vcode; + u8 async_event; + __le16 param3_rsrv; + __le16 param2_rsrv; + __le32 reserved7; +}; + +struct iscsi_sge { + struct regpair sge_addr; + __le16 sge_len; + __le16 reserved0; + __le32 reserved1; +}; + +struct iscsi_cached_sge_ctx { + struct iscsi_sge sge; + struct regpair reserved; + __le32 dsgl_curr_offset[2]; +}; + +struct iscsi_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 cdb[4]; +}; + +struct iscsi_common_hdr { + u8 hdr_status; + u8 hdr_response; + u8 hdr_flags; + u8 hdr_first_byte; +#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F +#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 +#define ISCSI_COMMON_HDR_IMM_MASK 0x1 +#define ISCSI_COMMON_HDR_IMM_SHIFT 6 +#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 +#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 lun_reserved[4]; + __le32 data[6]; +}; + +struct iscsi_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le32 initial_ack; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2 + u8 pbl_page_size_log; + u8 pbe_page_size_log; + u8 default_cq; + __le32 stat_sn; +}; + +struct iscsi_slow_path_hdr { + u8 op_code; + u8 flags; +#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF +#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 +}; + +struct iscsi_conn_update_ramrod_params { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 flags; +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4 + u8 reserved0[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 exp_stat_sn; +}; + +struct iscsi_ext_cdb_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; + __le32 exp_stat_sn; + struct iscsi_sge cdb_sge; +}; + +struct iscsi_login_req_hdr { + u8 version_min; + u8 version_max; + u8 flags_attr; +#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 isid_TABC; + __le16 tsih; + __le16 isid_d; + __le32 itt; + __le16 reserved1; + __le16 cid; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved2[4]; +}; + +struct iscsi_logout_req_hdr { + __le16 reserved0; + u8 reason_code; + u8 opcode; + __le32 reserved1; + __le32 reserved2[2]; + __le32 itt; + __le16 reserved3; + __le16 cid; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved4[4]; +}; + +struct iscsi_data_out_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 reserved3; + __le32 exp_stat_sn; + __le32 reserved4; + __le32 data_sn; + __le32 buffer_offset; + __le32 reserved5; +}; + +struct iscsi_data_in_hdr { + u8 status_rsvd; + u8 reserved1; + u8 flags; +#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 +#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 +#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 +#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 +#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 +#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 +#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 data_sn; + __le32 buffer_offset; + __le32 residual_count; +}; + +struct iscsi_r2t_hdr { + u8 reserved0[3]; + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 r2t_sn; + __le32 buffer_offset; + __le32 desired_data_trns_len; +}; + +struct iscsi_nop_out_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved3; + __le32 reserved4; + __le32 reserved5; + __le32 reserved6; +}; + +struct iscsi_nop_in_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; +}; + +struct iscsi_login_response_hdr { + u8 version_active; + u8 version_max; + u8 flags_attr; +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 isid_TABC; + __le16 tsih; + __le16 isid_d; + __le32 itt; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 reserved2; + u8 status_detail; + u8 status_class; + __le32 reserved4[2]; +}; + +struct iscsi_logout_response_hdr { + u8 reserved1; + u8 response; + u8 flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 reserved2[2]; + __le32 itt; + __le32 reserved3; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4; + __le16 time2retain; + __le16 time2wait; + __le32 reserved5[1]; +}; + +struct iscsi_text_request_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 +#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved4[4]; +}; + +struct iscsi_text_response_hdr { + __le16 reserved1; + u8 flags; +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4[3]; +}; + +struct iscsi_tmf_request_hdr { + __le16 reserved0; + u8 function; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 rtt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 ref_cmd_sn; + __le32 exp_data_sn; + __le32 reserved4[2]; +}; + +struct iscsi_tmf_response_hdr { + u8 reserved2; + u8 hdr_response; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair reserved0; + __le32 itt; + __le32 rtt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4[3]; +}; + +struct iscsi_response_hdr { + u8 hdr_status; + u8 hdr_response; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 snack_tag; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 exp_data_sn; + __le32 bi_residual_count; + __le32 residual_count; +}; + +struct iscsi_reject_hdr { + u8 reserved4; + u8 hdr_reason; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair reserved0; + __le32 reserved1; + __le32 reserved2; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 data_sn; + __le32 reserved3[2]; +}; + +union iscsi_task_hdr { + struct iscsi_common_hdr common; + struct data_hdr data; + struct iscsi_cmd_hdr cmd; + struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd; + struct iscsi_login_req_hdr login_req; + struct iscsi_logout_req_hdr logout_req; + struct iscsi_data_out_hdr data_out; + struct iscsi_data_in_hdr data_in; + struct iscsi_r2t_hdr r2t; + struct iscsi_nop_out_hdr nop_out; + struct iscsi_nop_in_hdr nop_in; + struct iscsi_login_response_hdr login_response; + struct iscsi_logout_response_hdr logout_response; + struct iscsi_text_request_hdr text_request; + struct iscsi_text_response_hdr text_response; + struct iscsi_tmf_request_hdr tmf_request; + struct iscsi_tmf_response_hdr tmf_response; + struct iscsi_response_hdr response; + struct iscsi_reject_hdr reject; + struct iscsi_async_msg_hdr async_msg; +}; + +struct iscsi_cqe_common { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le32 reserved[3]; + union iscsi_task_hdr iscsi_hdr; +}; + +struct iscsi_cqe_solicited { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le16 itid; + u8 task_type; + u8 fw_dbg_field; + __le32 reserved1[2]; + union iscsi_task_hdr iscsi_hdr; +}; + +struct iscsi_cqe_unsolicited { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le16 reserved0; + u8 reserved1; + u8 unsol_cqe_type; + struct regpair rqe_opaque; + union iscsi_task_hdr iscsi_hdr; +}; + +union iscsi_cqe { + struct iscsi_cqe_common cqe_common; + struct iscsi_cqe_solicited cqe_solicited; + struct iscsi_cqe_unsolicited cqe_unsolicited; +}; + +enum iscsi_cqes_type { + ISCSI_CQE_TYPE_SOLICITED = 1, + ISCSI_CQE_TYPE_UNSOLICITED, + ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE + , + ISCSI_CQE_TYPE_TASK_CLEANUP, + ISCSI_CQE_TYPE_DUMMY, + MAX_ISCSI_CQES_TYPE +}; + +enum iscsi_cqe_unsolicited_type { + ISCSI_CQE_UNSOLICITED_NONE, + ISCSI_CQE_UNSOLICITED_SINGLE, + ISCSI_CQE_UNSOLICITED_FIRST, + ISCSI_CQE_UNSOLICITED_MIDDLE, + ISCSI_CQE_UNSOLICITED_LAST, + MAX_ISCSI_CQE_UNSOLICITED_TYPE +}; + +struct iscsi_virt_sgl_ctx { + struct regpair sgl_base; + struct regpair dsgl_base; + __le32 sgl_initial_offset; + __le32 dsgl_initial_offset; + __le32 dsgl_curr_offset[2]; +}; + +struct iscsi_sgl_var_params { + u8 sgl_ptr; + u8 dsgl_ptr; + __le16 sge_offset; + __le16 dsge_offset; +}; + +struct iscsi_phys_sgl_ctx { + struct regpair sgl_base; + struct regpair dsgl_base; + u8 sgl_size; + u8 dsgl_size; + __le16 reserved; + struct iscsi_sgl_var_params var_params[2]; +}; + +union iscsi_data_desc_ctx { + struct iscsi_virt_sgl_ctx virt_sgl; + struct iscsi_phys_sgl_ctx phys_sgl; + struct iscsi_cached_sge_ctx cached_sge; +}; + +struct iscsi_debug_modes { + u8 flags; +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3 +#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6 +}; + +struct iscsi_dif_flags { + u8 flags; +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 +}; + +enum iscsi_eqe_opcode { + ISCSI_EVENT_TYPE_INIT_FUNC = 0, + ISCSI_EVENT_TYPE_DESTROY_FUNC, + ISCSI_EVENT_TYPE_OFFLOAD_CONN, + ISCSI_EVENT_TYPE_UPDATE_CONN, + ISCSI_EVENT_TYPE_CLEAR_SQ, + ISCSI_EVENT_TYPE_TERMINATE_CONN, + ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, + ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, + RESERVED8, + RESERVED9, + ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, + ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, + ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, + ISCSI_EVENT_TYPE_ASYN_SYN_RCVD, + ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME, + ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT, + ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, + ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2, + ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR, + ISCSI_EVENT_TYPE_TCP_CONN_ERROR, + ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES, + MAX_ISCSI_EQE_OPCODE +}; + +enum iscsi_error_types { + ISCSI_STATUS_NONE = 0, + ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, + ISCSI_CONN_ERROR_TASK_CID_MISMATCH, + ISCSI_CONN_ERROR_TASK_NOT_VALID, + ISCSI_CONN_ERROR_RQ_RING_IS_FULL, + ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, + ISCSI_CONN_ERROR_HQE_CACHING_FAILED, + ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, + ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, + ISCSI_CONN_ERROR_DATA_OVERRUN, + ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION, + ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, + ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, + ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, + ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, + ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, + ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX, + ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, + ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR, + ISCSI_ERROR_UNKNOWN, + MAX_ISCSI_ERROR_TYPES +}; + +struct iscsi_mflags { + u8 mflags; +#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1 +#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0 +#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1 +#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1 +#define ISCSI_MFLAGS_RESERVED_MASK 0x3F +#define ISCSI_MFLAGS_RESERVED_SHIFT 2 +}; + +struct iscsi_sgl { + struct regpair sgl_addr; + __le16 updated_sge_size; + __le16 updated_sge_offset; + __le32 byte_offset; +}; + +union iscsi_mstorm_sgl { + struct iscsi_sgl sgl_struct; + struct iscsi_sge single_sge; +}; + +enum iscsi_ramrod_cmd_id { + ISCSI_RAMROD_CMD_ID_UNUSED = 0, + ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, + ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2, + ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3, + ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, + ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, + ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, + MAX_ISCSI_RAMROD_CMD_ID +}; + +struct iscsi_reg1 { + __le32 reg1_map; +#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 3 +}; + +union iscsi_seq_num { + __le16 data_sn; + __le16 r2t_sn; +}; + +struct iscsi_spe_conn_offload { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + struct iscsi_conn_offload_params iscsi; + struct tcp_offload_params tcp; +}; + +struct iscsi_spe_conn_offload_option2 { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + struct iscsi_conn_offload_params iscsi; + struct tcp_offload_params_opt2 tcp; +}; + +struct iscsi_spe_conn_termination { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 abortive; + u8 reserved0[7]; + struct regpair queue_cnts_addr; + struct regpair query_params_addr; +}; + +struct iscsi_spe_func_dstry { + struct iscsi_slow_path_hdr hdr; + __le16 reserved0; + __le32 reserved1; +}; + +struct iscsi_spe_func_init { + struct iscsi_slow_path_hdr hdr; + __le16 half_way_close_timeout; + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 ll2_rx_queue_id; + u8 ooo_enable; + struct iscsi_debug_modes debug_mode; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; + __le32 reserved4; + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; +}; + +struct ystorm_iscsi_task_state { + union iscsi_data_desc_ctx sgl_ctx_union; + __le32 buffer_offset[2]; + __le16 bytes_nxt_dif; + __le16 rxmit_bytes_nxt_dif; + union iscsi_seq_num seq_num_union; + u8 dif_bytes_leftover; + u8 rxmit_dif_bytes_leftover; + __le16 reuse_count; + struct iscsi_dif_flags dif_flags; + u8 local_comp; + __le32 exp_r2t_sn; + __le32 sgl_offset[2]; +}; + +struct ystorm_iscsi_task_st_ctx { + struct ystorm_iscsi_task_state state; + union iscsi_task_hdr pdu_hdr; +}; + +struct ystorm_iscsi_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 word0; + u8 flags0; +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 TTT; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct mstorm_iscsi_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct ustorm_iscsi_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + u8 next_tid_valid; + u8 byte3; + __le16 word1; + __le16 next_tid; + __le16 word3; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +struct mstorm_iscsi_task_st_ctx { + union iscsi_mstorm_sgl sgl_union; + struct iscsi_dif_flags dif_flags; + struct iscsi_mflags flags; + u8 sgl_size; + u8 host_sge_index; + __le16 dix_cur_sge_offset; + __le16 dix_cur_sge_size; + __le32 data_offset_rtid; + u8 dif_offset; + u8 dix_sgl_size; + u8 dix_sge_index; + u8 task_type; + struct regpair sense_db; + struct regpair dix_sgl_cur_sge; + __le32 rem_task_size; + __le16 reuse_count; + __le16 dif_data_residue; + u8 reserved0[4]; + __le32 reserved1[1]; +}; + +struct ustorm_iscsi_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair lun; + struct iscsi_reg1 reg1; + u8 flags2; +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 + u8 reserved2; + __le16 reserved3; + __le32 reserved4; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; + u8 task_type; + u8 error_flags; +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 + u8 flags; +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 + u8 cq_rss_number; +}; + +struct iscsi_task_context { + struct ystorm_iscsi_task_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct tdif_task_context tdif_context; + struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct mstorm_iscsi_task_st_ctx mstorm_st_context; + struct ustorm_iscsi_task_st_ctx ustorm_st_context; + struct rdif_task_context rdif_context; +}; + +enum iscsi_task_type { + ISCSI_TASK_TYPE_INITIATOR_WRITE, + ISCSI_TASK_TYPE_INITIATOR_READ, + ISCSI_TASK_TYPE_MIDPATH, + ISCSI_TASK_TYPE_UNSOLIC, + ISCSI_TASK_TYPE_EXCHCLEANUP, + ISCSI_TASK_TYPE_IRRELEVANT, + ISCSI_TASK_TYPE_TARGET_WRITE, + ISCSI_TASK_TYPE_TARGET_READ, + ISCSI_TASK_TYPE_TARGET_RESPONSE, + ISCSI_TASK_TYPE_LOGIN_RESPONSE, + MAX_ISCSI_TASK_TYPE +}; + +union iscsi_ttt_txlen_union { + __le32 desired_tx_len; + __le32 ttt; +}; + +struct iscsi_uhqe { + __le32 reg1; +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 +#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 +#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 +#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 +#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 +#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 +#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 +#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 + __le32 reg2; +#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF +#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 +#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +}; + +struct iscsi_wqe_field { + __le32 contlen_cdbsize_field; +#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24 +}; + +union iscsi_wqe_field_union { + struct iscsi_wqe_field cont_field; + __le32 prev_tid; +}; + +struct iscsi_wqe { + __le16 task_id; + u8 flags; +#define ISCSI_WQE_WQE_TYPE_MASK 0x7 +#define ISCSI_WQE_WQE_TYPE_SHIFT 0 +#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3 +#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1 +#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6 +#define ISCSI_WQE_RESPONSE_MASK 0x1 +#define ISCSI_WQE_RESPONSE_SHIFT 7 + struct iscsi_dif_flags prot_flags; + union iscsi_wqe_field_union cont_prevtid_union; +}; + +enum iscsi_wqe_type { + ISCSI_WQE_TYPE_NORMAL, + ISCSI_WQE_TYPE_TASK_CLEANUP, + ISCSI_WQE_TYPE_MIDDLE_PATH, + ISCSI_WQE_TYPE_LOGIN, + ISCSI_WQE_TYPE_FIRST_R2T_CONT, + ISCSI_WQE_TYPE_NONFIRST_R2T_CONT, + ISCSI_WQE_TYPE_RESPONSE, + MAX_ISCSI_WQE_TYPE +}; + +struct iscsi_xhqe { + union iscsi_ttt_txlen_union ttt_or_txlen; + __le32 exp_stat_sn; + struct iscsi_dif_flags prot_flags; + u8 total_ahs_length; + u8 opcode; + u8 flags; +#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0 +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 3 +#define ISCSI_XHQE_SUPER_IO_MASK 0x1 +#define ISCSI_XHQE_SUPER_IO_SHIFT 4 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 5 +#define ISCSI_XHQE_RESERVED_MASK 0x3 +#define ISCSI_XHQE_RESERVED_SHIFT 6 + union iscsi_seq_num seq_num_union; + __le16 reserved1; +}; + +struct mstorm_iscsi_stats_drv { + struct regpair iscsi_rx_dropped_pdus_task_not_valid; +}; + +struct ooo_opaque { + __le32 cid; + u8 drop_isle; + u8 drop_size; + u8 ooo_opcode; + u8 ooo_isle; +}; + +struct pstorm_iscsi_stats_drv { + struct regpair iscsi_tx_bytes_cnt; + struct regpair iscsi_tx_packet_cnt; +}; + +struct tstorm_iscsi_stats_drv { + struct regpair iscsi_rx_bytes_cnt; + struct regpair iscsi_rx_packet_cnt; + struct regpair iscsi_rx_new_ooo_isle_events_cnt; + __le32 iscsi_cmdq_threshold_cnt; + __le32 iscsi_rq_threshold_cnt; + __le32 iscsi_immq_threshold_cnt; +}; + +struct ustorm_iscsi_stats_drv { + struct regpair iscsi_rx_data_pdu_cnt; + struct regpair iscsi_rx_r2t_pdu_cnt; + struct regpair iscsi_rx_total_pdu_cnt; +}; + +struct xstorm_iscsi_stats_drv { + struct regpair iscsi_tx_go_to_slow_start_event_cnt; + struct regpair iscsi_tx_fast_retransmit_event_cnt; +}; + +struct ystorm_iscsi_stats_drv { + struct regpair iscsi_tx_data_pdu_cnt; + struct regpair iscsi_tx_r2t_pdu_cnt; + struct regpair iscsi_tx_total_pdu_cnt; +}; + +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; + +struct tstorm_iscsi_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 + u8 flags3; +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 + u8 flags4; +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 byte2; + __le16 word1; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg1; + __le32 reg2; +}; + +#endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 5f8fcaaa6504..7e441bdeabdc 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -25,10 +25,9 @@ } while (0) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) -#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) #define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) -#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) #define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) +#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) enum qed_chain_mode { /* Each Page contains a next pointer at its end */ @@ -47,16 +46,56 @@ enum qed_chain_use_mode { QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; +enum qed_chain_cnt_type { + /* The chain's size/prod/cons are kept in 16-bit variables */ + QED_CHAIN_CNT_TYPE_U16, + + /* The chain's size/prod/cons are kept in 32-bit variables */ + QED_CHAIN_CNT_TYPE_U32, +}; + struct qed_chain_next { struct regpair next_phys; void *next_virt; }; +struct qed_chain_pbl_u16 { + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct qed_chain_pbl_u32 { + u32 prod_page_idx; + u32 cons_page_idx; +}; + struct qed_chain_pbl { + /* Base address of a pre-allocated buffer for pbl */ dma_addr_t p_phys_table; void *p_virt_table; - u16 prod_page_idx; - u16 cons_page_idx; + + /* Table for keeping the virtual addresses of the chain pages, + * respectively to the physical addresses in the pbl table. + */ + void **pp_virt_addr_tbl; + + /* Index to current used page by producer/consumer */ + union { + struct qed_chain_pbl_u16 pbl16; + struct qed_chain_pbl_u32 pbl32; + } u; +}; + +struct qed_chain_u16 { + /* Cyclic index of next element to produce/consme */ + u16 prod_idx; + u16 cons_idx; +}; + +struct qed_chain_u32 { + /* Cyclic index of next element to produce/consme */ + u32 prod_idx; + u32 cons_idx; }; struct qed_chain { @@ -64,13 +103,25 @@ struct qed_chain { dma_addr_t p_phys_addr; void *p_prod_elem; void *p_cons_elem; - u16 page_cnt; + enum qed_chain_mode mode; enum qed_chain_use_mode intended_use; /* used to produce/consume */ - u16 capacity; /*< number of _usable_ elements */ - u16 size; /* number of elements */ - u16 prod_idx; - u16 cons_idx; + enum qed_chain_cnt_type cnt_type; + + union { + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; + + u32 page_cnt; + + /* Number of elements - capacity is for usable elements only, + * while size will contain total number of elements [for entire chain]. + */ + u32 capacity; + u32 size; + + /* Elements information for fast calculations */ u16 elem_per_page; u16 elem_per_page_mask; u16 elem_unusable; @@ -96,66 +147,69 @@ struct qed_chain { #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) +#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) + /* Accessors */ static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) { - return p_chain->prod_idx; + return p_chain->u.chain16.prod_idx; } static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) { - return p_chain->cons_idx; + return p_chain->u.chain16.cons_idx; +} + +static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) +{ + return p_chain->u.chain32.cons_idx; } static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { u16 used; - /* we don't need to trancate upon assignmet, as we assign u32->u16 */ - used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - - (u32)p_chain->cons_idx; + used = (u16) (((u32)0x10000 + + (u32)p_chain->u.chain16.prod_idx) - + (u32)p_chain->u.chain16.cons_idx); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->prod_idx / p_chain->elem_per_page - - p_chain->cons_idx / p_chain->elem_per_page; + used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - + p_chain->u.chain16.cons_idx / p_chain->elem_per_page; - return p_chain->capacity - used; + return (u16)(p_chain->capacity - used); } -static inline u8 qed_chain_is_full(struct qed_chain *p_chain) +static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { - return qed_chain_get_elem_left(p_chain) == p_chain->capacity; -} + u32 used; -static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) -{ - return qed_chain_get_elem_left(p_chain) == 0; -} + used = (u32) (((u64)0x100000000ULL + + (u64)p_chain->u.chain32.prod_idx) - + (u64)p_chain->u.chain32.cons_idx); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - + p_chain->u.chain32.cons_idx / p_chain->elem_per_page; -static inline u16 qed_chain_get_elem_per_page( - struct qed_chain *p_chain) -{ - return p_chain->elem_per_page; + return p_chain->capacity - used; } -static inline u16 qed_chain_get_usable_per_page( - struct qed_chain *p_chain) +static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) { return p_chain->usable_per_page; } -static inline u16 qed_chain_get_unusable_per_page( - struct qed_chain *p_chain) +static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) { return p_chain->elem_unusable; } -static inline u16 qed_chain_get_size(struct qed_chain *p_chain) +static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) { - return p_chain->size; + return p_chain->page_cnt; } -static inline dma_addr_t -qed_chain_get_pbl_phys(struct qed_chain *p_chain) +static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { return p_chain->pbl.p_phys_table; } @@ -172,65 +226,63 @@ qed_chain_get_pbl_phys(struct qed_chain *p_chain) */ static inline void qed_chain_advance_page(struct qed_chain *p_chain, - void **p_next_elem, - u16 *idx_to_inc, - u16 *page_to_inc) + void **p_next_elem, void *idx_to_inc, void *page_to_inc) { + struct qed_chain_next *p_next = NULL; + u32 page_index = 0; switch (p_chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: - { - struct qed_chain_next *p_next = *p_next_elem; + p_next = *p_next_elem; *p_next_elem = p_next->next_virt; - *idx_to_inc += p_chain->elem_unusable; + if (is_chain_u16(p_chain)) + *(u16 *)idx_to_inc += p_chain->elem_unusable; + else + *(u32 *)idx_to_inc += p_chain->elem_unusable; break; - } case QED_CHAIN_MODE_SINGLE: *p_next_elem = p_chain->p_virt_addr; break; case QED_CHAIN_MODE_PBL: - /* It is assumed pages are sequential, next element needs - * to change only when passing going back to first from last. - */ - if (++(*page_to_inc) == p_chain->page_cnt) { - *page_to_inc = 0; - *p_next_elem = p_chain->p_virt_addr; + if (is_chain_u16(p_chain)) { + if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) + *(u16 *)page_to_inc = 0; + page_index = *(u16 *)page_to_inc; + } else { + if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) + *(u32 *)page_to_inc = 0; + page_index = *(u32 *)page_to_inc; } + *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; } } #define is_unusable_idx(p, idx) \ - (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_idx_u32(p, idx) \ + (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) +#define is_unusable_next_idx(p, idx) \ + ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \ + (p)->usable_per_page) -#define is_unusable_next_idx(p, idx) \ - ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) +#define is_unusable_next_idx_u32(p, idx) \ + ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \ + (p)->usable_per_page) -#define test_ans_skip(p, idx) \ +#define test_and_skip(p, idx) \ do { \ - if (is_unusable_idx(p, idx)) { \ - (p)->idx += (p)->elem_unusable; \ + if (is_chain_u16(p)) { \ + if (is_unusable_idx(p, idx)) \ + (p)->u.chain16.idx += (p)->elem_unusable; \ + } else { \ + if (is_unusable_idx_u32(p, idx)) \ + (p)->u.chain32.idx += (p)->elem_unusable; \ } \ } while (0) /** - * @brief qed_chain_return_multi_produced - - * - * A chain in which the driver "Produces" elements should use this API - * to indicate previous produced elements are now consumed. - * - * @param p_chain - * @param num - */ -static inline void -qed_chain_return_multi_produced(struct qed_chain *p_chain, - u16 num) -{ - p_chain->cons_idx += num; - test_ans_skip(p_chain, cons_idx); -} - -/** * @brief qed_chain_return_produced - * * A chain in which the driver "Produces" elements should use this API @@ -240,8 +292,11 @@ qed_chain_return_multi_produced(struct qed_chain *p_chain, */ static inline void qed_chain_return_produced(struct qed_chain *p_chain) { - p_chain->cons_idx++; - test_ans_skip(p_chain, cons_idx); + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx++; + else + p_chain->u.chain32.cons_idx++; + test_and_skip(p_chain, cons_idx); } /** @@ -257,21 +312,33 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain) */ static inline void *qed_chain_produce(struct qed_chain *p_chain) { - void *ret = NULL; - - if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == - p_chain->next_page_mask) { - qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, - &p_chain->prod_idx, - &p_chain->pbl.prod_page_idx); + void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain16.prod_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain16.prod_idx++; + } else { + if ((p_chain->u.chain32.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain32.prod_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain32.prod_idx++; } - ret = p_chain->p_prod_elem; - p_chain->prod_idx++; + p_ret = p_chain->p_prod_elem; p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + p_chain->elem_size); - return ret; + return p_ret; } /** @@ -282,9 +349,9 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) * @param p_chain * @param num * - * @return u16, number of unusable BDs + * @return number of unusable BDs */ -static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) +static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) { return p_chain->capacity; } @@ -297,11 +364,13 @@ static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) * * @param p_chain */ -static inline void -qed_chain_recycle_consumed(struct qed_chain *p_chain) +static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) { - test_ans_skip(p_chain, prod_idx); - p_chain->prod_idx++; + test_and_skip(p_chain, prod_idx); + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx++; + else + p_chain->u.chain32.prod_idx++; } /** @@ -316,21 +385,33 @@ qed_chain_recycle_consumed(struct qed_chain *p_chain) */ static inline void *qed_chain_consume(struct qed_chain *p_chain) { - void *ret = NULL; - - if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == - p_chain->next_page_mask) { + void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain16.cons_idx; + p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain16.cons_idx++; + } else { + if ((p_chain->u.chain32.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain32.cons_idx; + p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, - &p_chain->cons_idx, - &p_chain->pbl.cons_page_idx); + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain32.cons_idx++; } - ret = p_chain->p_cons_elem; - p_chain->cons_idx++; + p_ret = p_chain->p_cons_elem; p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + p_chain->elem_size); - return ret; + return p_ret; } /** @@ -340,16 +421,33 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) */ static inline void qed_chain_reset(struct qed_chain *p_chain) { - int i; - - p_chain->prod_idx = 0; - p_chain->cons_idx = 0; - p_chain->p_cons_elem = p_chain->p_virt_addr; - p_chain->p_prod_elem = p_chain->p_virt_addr; + u32 i; + + if (is_chain_u16(p_chain)) { + p_chain->u.chain16.prod_idx = 0; + p_chain->u.chain16.cons_idx = 0; + } else { + p_chain->u.chain32.prod_idx = 0; + p_chain->u.chain32.cons_idx = 0; + } + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; if (p_chain->mode == QED_CHAIN_MODE_PBL) { - p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; - p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; + /* Use (page_cnt - 1) as a reset value for the prod/cons page's + * indices, to avoid unnecessary page advancing on the first + * call to qed_chain_produce/consume. Instead, the indices + * will be advanced to page_cnt and then will be wrapped to 0. + */ + u32 reset_val = p_chain->page_cnt - 1; + + if (is_chain_u16(p_chain)) { + p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; + p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; + } else { + p_chain->pbl.u.pbl32.prod_page_idx = reset_val; + p_chain->pbl.u.pbl32.cons_page_idx = reset_val; + } } switch (p_chain->intended_use) { @@ -377,168 +475,184 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) * @param intended_use * @param mode */ -static inline void qed_chain_init(struct qed_chain *p_chain, - void *p_virt_addr, - dma_addr_t p_phys_addr, - u16 page_cnt, - u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode) +static inline void qed_chain_init_params(struct qed_chain *p_chain, + u32 page_cnt, + u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type) { /* chain fixed parameters */ - p_chain->p_virt_addr = p_virt_addr; - p_chain->p_phys_addr = p_phys_addr; + p_chain->p_virt_addr = NULL; + p_chain->p_phys_addr = 0; p_chain->elem_size = elem_size; - p_chain->page_cnt = page_cnt; + p_chain->intended_use = intended_use; p_chain->mode = mode; + p_chain->cnt_type = cnt_type; - p_chain->intended_use = intended_use; p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - p_chain->usable_per_page = - USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->capacity = p_chain->usable_per_page * page_cnt; - p_chain->size = p_chain->elem_per_page * page_cnt; + p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; - p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->next_page_mask = (p_chain->usable_per_page & p_chain->elem_per_page_mask); - if (mode == QED_CHAIN_MODE_NEXT_PTR) { - struct qed_chain_next *p_next; - u16 i; - - for (i = 0; i < page_cnt - 1; i++) { - /* Increment mem_phy to the next page. */ - p_phys_addr += QED_CHAIN_PAGE_SIZE; - - /* Initialize the physical address of the next page. */ - p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + - elem_size * - p_chain-> - usable_per_page); - - p_next->next_phys.lo = DMA_LO_LE(p_phys_addr); - p_next->next_phys.hi = DMA_HI_LE(p_phys_addr); - - /* Initialize the virtual address of the next page. */ - p_next->next_virt = (void *)((u8 *)p_virt_addr + - QED_CHAIN_PAGE_SIZE); - - /* Move to the next page. */ - p_virt_addr = p_next->next_virt; - } - - /* Last page's next should point to beginning of the chain */ - p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + - elem_size * - p_chain->usable_per_page); + p_chain->page_cnt = page_cnt; + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; - p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); - p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); - p_next->next_virt = p_chain->p_virt_addr; - } - qed_chain_reset(p_chain); + p_chain->pbl.p_phys_table = 0; + p_chain->pbl.p_virt_table = NULL; + p_chain->pbl.pp_virt_addr_tbl = NULL; } /** - * @brief qed_chain_pbl_init - Initalizes a basic pbl chain - * struct + * @brief qed_chain_init_mem - + * + * Initalizes a basic chain struct with its chain buffers + * * @param p_chain * @param p_virt_addr virtual address of allocated buffer's beginning * @param p_phys_addr physical address of allocated buffer's beginning - * @param page_cnt number of pages in the allocated buffer - * @param elem_size size of each element in the chain - * @param use_mode - * @param p_phys_pbl pointer to a pre-allocated side table - * which will hold physical page addresses. - * @param p_virt_pbl pointer to a pre allocated side table - * which will hold virtual page addresses. + * */ -static inline void -qed_chain_pbl_init(struct qed_chain *p_chain, - void *p_virt_addr, - dma_addr_t p_phys_addr, - u16 page_cnt, - u8 elem_size, - enum qed_chain_use_mode use_mode, - dma_addr_t p_phys_pbl, - dma_addr_t *p_virt_pbl) +static inline void qed_chain_init_mem(struct qed_chain *p_chain, + void *p_virt_addr, dma_addr_t p_phys_addr) { - dma_addr_t *p_pbl_dma = p_virt_pbl; - int i; - - qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt, - elem_size, use_mode, QED_CHAIN_MODE_PBL); + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; +} +/** + * @brief qed_chain_init_pbl_mem - + * + * Initalizes a basic chain struct with its pbl buffers + * + * @param p_chain + * @param p_virt_pbl pointer to a pre allocated side table which will hold + * virtual page addresses. + * @param p_phys_pbl pointer to a pre-allocated side table which will hold + * physical page addresses. + * @param pp_virt_addr_tbl + * pointer to a pre-allocated side table which will hold + * the virtual addresses of the chain pages. + * + */ +static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, + void *p_virt_pbl, + dma_addr_t p_phys_pbl, + void **pp_virt_addr_tbl) +{ p_chain->pbl.p_phys_table = p_phys_pbl; p_chain->pbl.p_virt_table = p_virt_pbl; - - /* Fill the PBL with physical addresses*/ - for (i = 0; i < page_cnt; i++) { - *p_pbl_dma = p_phys_addr; - p_phys_addr += QED_CHAIN_PAGE_SIZE; - p_pbl_dma++; - } + p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; } /** - * @brief qed_chain_set_prod - sets the prod to the given - * value + * @brief qed_chain_init_next_ptr_elem - + * + * Initalizes a next pointer element + * + * @param p_chain + * @param p_virt_curr virtual address of a chain page of which the next + * pointer element is initialized + * @param p_virt_next virtual address of the next chain page + * @param p_phys_next physical address of the next chain page * - * @param prod_idx - * @param p_prod_elem */ -static inline void qed_chain_set_prod(struct qed_chain *p_chain, - u16 prod_idx, - void *p_prod_elem) +static inline void +qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, + void *p_virt_curr, + void *p_virt_next, dma_addr_t p_phys_next) { - p_chain->prod_idx = prod_idx; - p_chain->p_prod_elem = p_prod_elem; + struct qed_chain_next *p_next; + u32 size; + + size = p_chain->elem_size * p_chain->usable_per_page; + p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); + + DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); + + p_next->next_virt = p_virt_next; } /** - * @brief qed_chain_get_elem - + * @brief qed_chain_get_last_elem - * - * get a pointer to an element represented by absolute idx + * Returns a pointer to the last element of the chain * * @param p_chain - * @assumption p_chain->size is a power of 2 * - * @return void*, a pointer to next element + * @return void* */ -static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, - u16 idx) +static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) { - void *ret = NULL; - - if (idx >= p_chain->size) - return NULL; + struct qed_chain_next *p_next = NULL; + void *p_virt_addr = NULL; + u32 size, last_page_idx; - ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; + if (!p_chain->p_virt_addr) + goto out; - return ret; + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + size = p_chain->elem_size * p_chain->usable_per_page; + p_virt_addr = p_chain->p_virt_addr; + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size); + while (p_next->next_virt != p_chain->p_virt_addr) { + p_virt_addr = p_next->next_virt; + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + size); + } + break; + case QED_CHAIN_MODE_SINGLE: + p_virt_addr = p_chain->p_virt_addr; + break; + case QED_CHAIN_MODE_PBL: + last_page_idx = p_chain->page_cnt - 1; + p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + break; + } + /* p_virt_addr points at this stage to the last page of the chain */ + size = p_chain->elem_size * (p_chain->usable_per_page - 1); + p_virt_addr = (u8 *)p_virt_addr + size; +out: + return p_virt_addr; } /** - * @brief qed_chain_sge_inc_cons_prod + * @brief qed_chain_set_prod - sets the prod to the given value * - * for sge chains, producer isn't increased serially, the ring - * is expected to be full at all times. Once elements are - * consumed, they are immediately produced. + * @param prod_idx + * @param p_prod_elem + */ +static inline void qed_chain_set_prod(struct qed_chain *p_chain, + u32 prod_idx, void *p_prod_elem) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx = (u16) prod_idx; + else + p_chain->u.chain32.prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief qed_chain_pbl_zero_mem - set chain memory to 0 * * @param p_chain - * @param cnt - * - * @return inline void */ -static inline void -qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain, - u16 cnt) +static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) { - p_chain->prod_idx += cnt; - p_chain->cons_idx += cnt; + u32 i, page_cnt; + + if (p_chain->mode != QED_CHAIN_MODE_PBL) + return; + + page_cnt = qed_chain_get_page_cnt(p_chain); + + for (i = 0; i < page_cnt; i++) + memset(p_chain->pbl.pp_virt_addr_tbl[i], 0, + QED_CHAIN_PAGE_SIZE); } #endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index e1d69834a11f..4475a9d8ae15 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -13,6 +13,7 @@ #include <linux/if_link.h> #include <linux/qed/eth_common.h> #include <linux/qed/qed_if.h> +#include <linux/qed/qed_iov_if.h> struct qed_dev_eth_info { struct qed_dev_info common; @@ -27,12 +28,15 @@ struct qed_dev_eth_info { struct qed_update_vport_rss_params { u16 rss_ind_table[128]; u32 rss_key[10]; + u8 rss_caps; }; struct qed_update_vport_params { u8 vport_id; u8 update_vport_active_flg; u8 vport_active_flg; + u8 update_tx_switching_flg; + u8 tx_switching_flg; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; u8 update_rss_flg; @@ -45,6 +49,7 @@ struct qed_start_vport_params { bool drop_ttl0; u8 vport_id; u16 mtu; + bool clear_stats; }; struct qed_stop_rxq_params { @@ -109,14 +114,88 @@ struct qed_queue_start_common_params { u8 vport_id; u16 sb; u16 sb_idx; + u16 vf_qid; +}; + +struct qed_tunn_params { + u16 vxlan_port; + u8 update_vxlan_port; + u16 geneve_port; + u8 update_geneve_port; }; struct qed_eth_cb_ops { struct qed_common_cb_ops common; + void (*force_mac) (void *dev, u8 *mac); +}; + +#ifdef CONFIG_DCB +/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration + * of dcbnl_rtnl_ops structure. + */ +struct qed_eth_dcbnl_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app); + int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app); + + /* CEE std */ + u8 (*getstate)(struct qed_dev *cdev); + u8 (*setstate)(struct qed_dev *cdev, u8 state); + void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting); + void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting); + u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap); + int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num); + u8 (*getpfcstate)(struct qed_dev *cdev); + int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id); + u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags); + + /* DCBX configuration */ + u8 (*getdcbx)(struct qed_dev *cdev); + void (*setpgtccfgtx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgtccfgrx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + u8 (*setall)(struct qed_dev *cdev); + int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num); + void (*setpfcstate)(struct qed_dev *cdev, u8 state); + int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up); + u8 (*setdcbx)(struct qed_dev *cdev, u8 state); + u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags); + + /* Peer apps */ + int (*peer_getappinfo)(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count); + int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table); + + /* CEE peer */ + int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc); + int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg); }; +#endif struct qed_eth_ops { const struct qed_common_ops *common; +#ifdef CONFIG_QED_SRIOV + const struct qed_iov_hv_ops *iov; +#endif +#ifdef CONFIG_DCB + const struct qed_eth_dcbnl_ops *dcb; +#endif int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); @@ -125,6 +204,8 @@ struct qed_eth_ops { struct qed_eth_cb_ops *ops, void *cookie); + bool(*check_mac) (struct qed_dev *cdev, u8 *mac); + int (*vport_start)(struct qed_dev *cdev, struct qed_start_vport_params *params); @@ -165,9 +246,12 @@ struct qed_eth_ops { void (*get_vport_stats)(struct qed_dev *cdev, struct qed_eth_stats *stats); + + int (*tunn_config)(struct qed_dev *cdev, + struct qed_tunn_params *params); }; -const struct qed_eth_ops *qed_get_eth_ops(u32 version); +const struct qed_eth_ops *qed_get_eth_ops(void); void qed_put_eth_ops(void); #endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 1f7599c77cd4..d6c4177df7cb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -25,6 +25,113 @@ #include <linux/qed/common_hsi.h> #include <linux/qed/qed_chain.h> +enum dcbx_protocol_type { + DCBX_PROTOCOL_ISCSI, + DCBX_PROTOCOL_FCOE, + DCBX_PROTOCOL_ROCE, + DCBX_PROTOCOL_ROCE_V2, + DCBX_PROTOCOL_ETH, + DCBX_MAX_PROTOCOL_TYPE +}; + +#ifdef CONFIG_DCB +#define QED_LLDP_CHASSIS_ID_STAT_LEN 4 +#define QED_LLDP_PORT_ID_STAT_LEN 4 +#define QED_DCBX_MAX_APP_PROTOCOL 32 +#define QED_MAX_PFC_PRIORITIES 8 +#define QED_DCBX_DSCP_SIZE 64 + +struct qed_dcbx_lldp_remote { + u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN]; + bool enable_rx; + bool enable_tx; + u32 tx_interval; + u32 max_credit; +}; + +struct qed_dcbx_lldp_local { + u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN]; +}; + +struct qed_dcbx_app_prio { + u8 roce; + u8 roce_v2; + u8 fcoe; + u8 iscsi; + u8 eth; +}; + +struct qed_dbcx_pfc_params { + bool willing; + bool enabled; + u8 prio[QED_MAX_PFC_PRIORITIES]; + u8 max_tc; +}; + +enum qed_dcbx_sf_ieee_type { + QED_DCBX_SF_IEEE_ETHTYPE, + QED_DCBX_SF_IEEE_TCP_PORT, + QED_DCBX_SF_IEEE_UDP_PORT, + QED_DCBX_SF_IEEE_TCP_UDP_PORT +}; + +struct qed_app_entry { + bool ethtype; + enum qed_dcbx_sf_ieee_type sf_ieee; + bool enabled; + u8 prio; + u16 proto_id; + enum dcbx_protocol_type proto_type; +}; + +struct qed_dcbx_params { + struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL]; + u16 num_app_entries; + bool app_willing; + bool app_valid; + bool app_error; + bool ets_willing; + bool ets_enabled; + bool ets_cbs; + bool valid; + u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES]; + u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES]; + u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES]; + struct qed_dbcx_pfc_params pfc; + u8 max_ets_tc; +}; + +struct qed_dcbx_admin_params { + struct qed_dcbx_params params; + bool valid; +}; + +struct qed_dcbx_remote_params { + struct qed_dcbx_params params; + bool valid; +}; + +struct qed_dcbx_operational_params { + struct qed_dcbx_app_prio app_prio; + struct qed_dcbx_params params; + bool valid; + bool enabled; + bool ieee; + bool cee; + u32 err; +}; + +struct qed_dcbx_get { + struct qed_dcbx_operational_params operational; + struct qed_dcbx_lldp_remote lldp_remote; + struct qed_dcbx_lldp_local lldp_local; + struct qed_dcbx_remote_params remote; + struct qed_dcbx_admin_params local; +}; +#endif + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -49,8 +156,70 @@ struct qed_eth_pf_params { u16 num_cons; }; +/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ +struct qed_iscsi_pf_params { + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[2]; + u32 max_cwnd; + u16 cq_num_entries; + u16 cmdq_num_entries; + u16 dup_ack_threshold; + u16 tx_sws_timer; + u16 min_rto; + u16 min_rto_rt; + u16 max_rto; + + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 half_way_close_timeout; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; + u16 cmdq_xoff_threshold; + u16 cmdq_xon_threshold; + u16 rq_buffer_size; + + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 num_queues; + u8 log_page_size; + u8 rqe_log_size; + u8 max_fin_rt; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 ll2_ooo_queue_id; + u8 ooo_enable; + + u8 is_target; + u8 bdq_pbl_num_entries[2]; +}; + +struct qed_rdma_pf_params { + /* Supplied to QED during resource allocation (may affect the ILT and + * the doorbell BAR). + */ + u32 min_dpis; /* number of requested DPIs */ + u32 num_mrs; /* number of requested memory regions */ + u32 num_qps; /* number of requested Queue Pairs */ + u32 num_srqs; /* number of requested SRQ */ + u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */ + u8 gl_pi; /* protocol index */ + + /* Will allocate rate limiters to be used with QPs */ + u8 enable_dcqcn; +}; + struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; + struct qed_iscsi_pf_params iscsi_pf_params; + struct qed_rdma_pf_params rdma_pf_params; }; enum qed_int_mode { @@ -91,8 +260,11 @@ struct qed_dev_info { /* MFW version */ u32 mfw_rev; + bool rdma_supported; + u32 flash_size; u8 mf_mode; + bool tx_switching; }; enum qed_sb_type { @@ -101,6 +273,7 @@ enum qed_sb_type { enum qed_protocol { QED_PROTOCOL_ETH, + QED_PROTOCOL_ISCSI, }; struct qed_link_params { @@ -110,6 +283,7 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) u32 override_flags; bool autoneg; u32 adv_speeds; @@ -118,6 +292,12 @@ struct qed_link_params { #define QED_LINK_PAUSE_RX_ENABLE BIT(1) #define QED_LINK_PAUSE_TX_ENABLE BIT(2) u32 pause_config; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) + u32 loopback_mode; }; struct qed_link_output { @@ -133,6 +313,13 @@ struct qed_link_output { u32 pause_config; }; +struct qed_probe_params { + enum qed_protocol protocol; + u32 dp_module; + u8 dp_level; + bool is_vf; +}; + #define QED_DRV_VER_STR_SIZE 12 struct qed_slowpath_params { u32 int_mode; @@ -158,10 +345,49 @@ struct qed_common_cb_ops { struct qed_link_output *link); }; +struct qed_selftest_ops { +/** + * @brief selftest_interrupt - Perform interrupt test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_interrupt)(struct qed_dev *cdev); + +/** + * @brief selftest_memory - Perform memory test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_memory)(struct qed_dev *cdev); + +/** + * @brief selftest_register - Perform register test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_register)(struct qed_dev *cdev); + +/** + * @brief selftest_clock - Perform clock test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_clock)(struct qed_dev *cdev); +}; + struct qed_common_ops { + struct qed_selftest_ops *selftest; + struct qed_dev* (*probe)(struct pci_dev *dev, - enum qed_protocol protocol, - u32 dp_module, u8 dp_level); + struct qed_probe_params *params); void (*remove)(struct qed_dev *cdev); @@ -211,6 +437,16 @@ struct qed_common_ops { void (*simd_handler_clean)(struct qed_dev *cdev, int index); + +/** + * @brief can_link_change - can the instance change the link or not + * + * @param cdev + * + * @return true if link-change is allowed, false otherwise. + */ + bool (*can_link_change)(struct qed_dev *cdev); + /** * @brief set_link - set links according to params * @@ -252,7 +488,8 @@ struct qed_common_ops { int (*chain_alloc)(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, - u16 num_elems, + enum qed_chain_cnt_type cnt_type, + u32 num_elems, size_t elem_size, struct qed_chain *p_chain); @@ -260,25 +497,40 @@ struct qed_common_ops { struct qed_chain *p_chain); /** - * @brief set_led - Configure LED mode + * @brief get_coalesce - Get coalesce parameters in usec * * @param cdev - * @param mode - LED mode + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec + * + */ + void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal); + +/** + * @brief set_coalesce - Configure Rx coalesce value in usec + * + * @param cdev + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec + * @param qid - Queue index + * @param sb_id - Status Block Id * * @return 0 on success, error otherwise. */ - int (*set_led)(struct qed_dev *cdev, - enum qed_led_mode mode); -}; + int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id); /** - * @brief qed_get_protocol_version + * @brief set_led - Configure LED mode * - * @param protocol + * @param cdev + * @param mode - LED mode * - * @return version supported by qed for given protocol driver + * @return 0 on success, error otherwise. */ -u32 qed_get_protocol_version(enum qed_protocol protocol); + int (*set_led)(struct qed_dev *cdev, + enum qed_led_mode mode); +}; #define MASK_FIELD(_name, _value) \ ((_value) &= (_name ## _MASK)) @@ -393,16 +645,16 @@ struct qed_eth_stats { /* port */ u64 rx_64_byte_packets; - u64 rx_127_byte_packets; - u64 rx_255_byte_packets; - u64 rx_511_byte_packets; - u64 rx_1023_byte_packets; - u64 rx_1518_byte_packets; - u64 rx_1522_byte_packets; - u64 rx_2047_byte_packets; - u64 rx_4095_byte_packets; - u64 rx_9216_byte_packets; - u64 rx_16383_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; @@ -524,4 +776,15 @@ static inline void internal_ram_wr(void __iomem *addr, __internal_ram_wr(NULL, addr, size, data); } +enum qed_rss_caps { + QED_RSS_IPV4 = 0x1, + QED_RSS_IPV6 = 0x2, + QED_RSS_IPV4_TCP = 0x4, + QED_RSS_IPV6_TCP = 0x8, + QED_RSS_IPV4_UDP = 0x10, + QED_RSS_IPV6_UDP = 0x20, +}; + +#define QED_RSS_IND_TABLE_SIZE 128 +#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ #endif diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h new file mode 100644 index 000000000000..5a4f8d0899e9 --- /dev/null +++ b/include/linux/qed/qed_iov_if.h @@ -0,0 +1,34 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_IOV_IF_H +#define _QED_IOV_IF_H + +#include <linux/qed/qed_if.h> + +/* Structs used by PF to control and manipulate child VFs */ +struct qed_iov_hv_ops { + int (*configure)(struct qed_dev *cdev, int num_vfs_param); + + int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid); + + int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); + + int (*get_config) (struct qed_dev *cdev, int vf_id, + struct ifla_vf_info *ivi); + + int (*set_link_state) (struct qed_dev *cdev, int vf_id, + int link_state); + + int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val); + + int (*set_rate) (struct qed_dev *cdev, int vfid, + u32 min_rate, u32 max_rate); +}; + +#endif diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h new file mode 100644 index 000000000000..187991c1f439 --- /dev/null +++ b/include/linux/qed/rdma_common.h @@ -0,0 +1,44 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __RDMA_COMMON__ +#define __RDMA_COMMON__ +/************************/ +/* RDMA FW CONSTANTS */ +/************************/ + +#define RDMA_RESERVED_LKEY (0) +#define RDMA_RING_PAGE_SIZE (0x1000) + +#define RDMA_MAX_SGE_PER_SQ_WQE (4) +#define RDMA_MAX_SGE_PER_RQ_WQE (4) + +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF) + +#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) +#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) + +#define RDMA_MAX_CQS (64 * 1024) +#define RDMA_MAX_TIDS (128 * 1024 - 1) +#define RDMA_MAX_PDS (64 * 1024) + +#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS + +#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) + +struct rdma_srq_id { + __le16 srq_idx; + __le16 opaque_fid; +}; + +struct rdma_srq_producers { + __le32 sge_prod; + __le32 wqe_prod; +}; + +#endif /* __RDMA_COMMON__ */ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h new file mode 100644 index 000000000000..2eeaf3dc6646 --- /dev/null +++ b/include/linux/qed/roce_common.h @@ -0,0 +1,17 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __ROCE_COMMON__ +#define __ROCE_COMMON__ + +#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) +#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) + +#define ROCE_MAX_QPS (32 * 1024) + +#endif /* __ROCE_COMMON__ */ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h new file mode 100644 index 000000000000..3b8e1efd9bc2 --- /dev/null +++ b/include/linux/qed/storage_common.h @@ -0,0 +1,91 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __STORAGE_COMMON__ +#define __STORAGE_COMMON__ + +#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) +#define BDQ_NUM_RESOURCES (4) + +#define BDQ_ID_RQ (0) +#define BDQ_ID_IMM_DATA (1) +#define BDQ_NUM_IDS (2) + +#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) + +struct scsi_bd { + struct regpair address; + struct regpair opaque; +}; + +struct scsi_bdq_ram_drv_data { + __le16 external_producer; + __le16 reserved0[3]; +}; + +struct scsi_drv_cmdq { + __le16 cmdq_cons; + __le16 reserved0; + __le32 reserved1; +}; + +struct scsi_init_func_params { + __le16 num_tasks; + u8 log_page_size; + u8 debug_mode; + u8 reserved2[12]; +}; + +struct scsi_init_func_queues { + struct regpair glbl_q_params_addr; + __le16 rq_buffer_size; + __le16 cq_num_entries; + __le16 cmdq_num_entries; + u8 bdq_resource_id; + u8 q_validity; +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 + u8 num_queues; + u8 queue_relative_offset; + u8 cq_sb_pi; + u8 cmdq_sb_pi; + __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS]; + __le16 reserved0; + u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; + struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; + __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; + __le16 bdq_xon_threshold[BDQ_NUM_IDS]; + __le16 cmdq_xoff_threshold; + __le16 cmdq_xon_threshold; + __le32 reserved1; +}; + +struct scsi_ram_per_bdq_resource_drv_data { + struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; +}; + +struct scsi_sge { + struct regpair sge_addr; + __le16 sge_len; + __le16 reserved0; + __le32 reserved1; +}; + +struct scsi_terminate_extra_params { + __le16 unsolicited_cq_count; + __le16 cmdq_count; + u8 reserved[4]; +}; + +#endif /* __STORAGE_COMMON__ */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h new file mode 100644 index 000000000000..accba0e6b704 --- /dev/null +++ b/include/linux/qed/tcp_common.h @@ -0,0 +1,226 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __TCP_COMMON__ +#define __TCP_COMMON__ + +#define TCP_INVALID_TIMEOUT_VAL -1 + +enum tcp_connect_mode { + TCP_CONNECT_ACTIVE, + TCP_CONNECT_PASSIVE, + MAX_TCP_CONNECT_MODE +}; + +struct tcp_init_params { + __le32 max_cwnd; + __le16 dup_ack_threshold; + __le16 tx_sws_timer; + __le16 min_rto; + __le16 min_rto_rt; + __le16 max_rto; + u8 maxfinrt; + u8 reserved[1]; +}; + +enum tcp_ip_version { + TCP_IPV4, + TCP_IPV6, + MAX_TCP_IP_VERSION +}; + +struct tcp_offload_params { + __le16 local_mac_addr_lo; + __le16 local_mac_addr_mid; + __le16 local_mac_addr_hi; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 vlan_id; + u8 flags; +#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 +#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 + u8 ip_version; + __le32 remote_ip[4]; + __le32 local_ip[4]; + __le32 flow_label; + u8 ttl; + u8 tos_or_tc; + __le16 remote_port; + __le16 local_port; + __le16 mss; + u8 rcv_wnd_scale; + u8 connect_mode; + __le16 srtt; + __le32 cwnd; + __le32 ss_thresh; + __le16 reserved1; + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + __le32 rcv_next; + __le32 snd_una; + __le32 snd_next; + __le32 snd_max; + __le32 snd_wnd; + __le32 rcv_wnd; + __le32 snd_wl1; + __le32 ts_time; + __le32 ts_recent; + __le32 ts_recent_age; + __le32 total_rt; + __le32 ka_timeout_delta; + __le32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + __le16 rtt_var; + __le16 reserved2; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 initial_rcv_wnd; + u8 snd_wnd_scale; + u8 ack_frequency; + __le16 da_timeout_value; + __le32 ts_ticks_per_second; +}; + +struct tcp_offload_params_opt2 { + __le16 local_mac_addr_lo; + __le16 local_mac_addr_mid; + __le16 local_mac_addr_hi; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 vlan_id; + u8 flags; +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 + u8 ip_version; + __le32 remote_ip[4]; + __le32 local_ip[4]; + __le32 flow_label; + u8 ttl; + u8 tos_or_tc; + __le16 remote_port; + __le16 local_port; + __le16 mss; + u8 rcv_wnd_scale; + u8 connect_mode; + __le16 syn_ip_payload_length; + __le32 syn_phy_addr_lo; + __le32 syn_phy_addr_hi; + __le32 reserved1[22]; +}; + +enum tcp_seg_placement_event { + TCP_EVENT_ADD_PEN, + TCP_EVENT_ADD_NEW_ISLE, + TCP_EVENT_ADD_ISLE_RIGHT, + TCP_EVENT_ADD_ISLE_LEFT, + TCP_EVENT_JOIN, + TCP_EVENT_NOP, + MAX_TCP_SEG_PLACEMENT_EVENT +}; + +struct tcp_update_params { + __le16 flags; +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 +#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 +#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 +#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 mss; + u8 ttl; + u8 tos_or_tc; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 flow_label; + __le32 initial_rcv_wnd; + u8 ka_max_probe_cnt; + u8 reserved1[7]; +}; + +struct tcp_upload_params { + __le32 rcv_next; + __le32 snd_una; + __le32 snd_next; + __le32 snd_max; + __le32 snd_wnd; + __le32 rcv_wnd; + __le32 snd_wl1; + __le32 cwnd; + __le32 ss_thresh; + __le16 srtt; + __le16 rtt_var; + __le32 ts_time; + __le32 ts_recent; + __le32 ts_recent_age; + __le32 total_rt; + __le32 ka_timeout_delta; + __le32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + __le32 reserved; +}; + +#endif /* __TCP_COMMON__ */ diff --git a/include/linux/quota.h b/include/linux/quota.h index 9dfb6bce8c9e..55107a8ff887 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -179,6 +179,16 @@ static inline struct kqid make_kqid_projid(kprojid_t projid) return kqid; } +/** + * qid_has_mapping - Report if a qid maps into a user namespace. + * @ns: The user namespace to see if a value maps into. + * @qid: The kernel internal quota identifier to test. + */ +static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid) +{ + return from_kqid(ns, qid) != (qid_t) -1; +} + extern spinlock_t dq_data_lock; @@ -200,8 +210,8 @@ struct mem_dqblk { qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ qsize_t dqb_isoftlimit; /* preferred inode limit */ qsize_t dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ + time64_t dqb_btime; /* time limit for excessive disk use */ + time64_t dqb_itime; /* time limit for excessive inode use */ }; /* diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 51a97ac8bfbf..4c45105dece3 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -29,51 +29,45 @@ #include <linux/rcupdate.h> /* - * An indirect pointer (root->rnode pointing to a radix_tree_node, rather - * than a data item) is signalled by the low bit set in the root->rnode - * pointer. - * - * In this case root->height is > 0, but the indirect pointer tests are - * needed for RCU lookups (because root->height is unreliable). The only - * time callers need worry about this is when doing a lookup_slot under - * RCU. - * - * Indirect pointer in fact is also used to tag the last pointer of a node - * when it is shrunk, before we rcu free the node. See shrink code for - * details. + * The bottom two bits of the slot determine how the remaining bits in the + * slot are interpreted: + * + * 00 - data pointer + * 01 - internal entry + * 10 - exceptional entry + * 11 - this bit combination is currently unused/reserved + * + * The internal entry may be a pointer to the next level in the tree, a + * sibling entry, or an indicator that the entry in this slot has been moved + * to another location in the tree and the lookup should be restarted. While + * NULL fits the 'data pointer' pattern, it means that there is no entry in + * the tree for this index (no matter what level of the tree it is found at). + * This means that you cannot store NULL in the tree as a value for the index. */ -#define RADIX_TREE_INDIRECT_PTR 1 +#define RADIX_TREE_ENTRY_MASK 3UL +#define RADIX_TREE_INTERNAL_NODE 1UL + /* - * A common use of the radix tree is to store pointers to struct pages; - * but shmem/tmpfs needs also to store swap entries in the same tree: - * those are marked as exceptional entries to distinguish them. + * Most users of the radix tree store pointers but shmem/tmpfs stores swap + * entries in the same tree. They are marked as exceptional entries to + * distinguish them from pointers to struct page. * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. */ #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 -#define RADIX_DAX_MASK 0xf -#define RADIX_DAX_SHIFT 4 -#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY) -#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY) -#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK) -#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) -#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ - RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE))) - -static inline int radix_tree_is_indirect_ptr(void *ptr) +static inline bool radix_tree_is_internal_node(void *ptr) { - return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); + return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == + RADIX_TREE_INTERNAL_NODE; } /*** radix-tree API starts here ***/ #define RADIX_TREE_MAX_TAGS 3 -#ifdef __KERNEL__ +#ifndef RADIX_TREE_MAP_SHIFT #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) -#else -#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ #endif #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) @@ -86,16 +80,13 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* Height component in node->path */ -#define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1) -#define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1) - /* Internally used bits of node->count */ #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) struct radix_tree_node { - unsigned int path; /* Offset in parent & height from the bottom */ + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ unsigned int count; union { struct { @@ -115,13 +106,11 @@ struct radix_tree_node { /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ struct radix_tree_root { - unsigned int height; gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; }; #define RADIX_TREE_INIT(mask) { \ - .height = 0, \ .gfp_mask = (mask), \ .rnode = NULL, \ } @@ -131,11 +120,15 @@ struct radix_tree_root { #define INIT_RADIX_TREE(root, mask) \ do { \ - (root)->height = 0; \ (root)->gfp_mask = (mask); \ (root)->rnode = NULL; \ } while (0) +static inline bool radix_tree_empty(struct radix_tree_root *root) +{ + return root->rnode == NULL; +} + /** * Radix-tree synchronization * @@ -231,7 +224,7 @@ static inline void *radix_tree_deref_slot_protected(void **pslot, */ static inline int radix_tree_deref_retry(void *arg) { - return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); + return unlikely(radix_tree_is_internal_node(arg)); } /** @@ -252,8 +245,7 @@ static inline int radix_tree_exceptional_entry(void *arg) */ static inline int radix_tree_exception(void *arg) { - return unlikely((unsigned long)arg & - (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY)); + return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } /** @@ -266,7 +258,7 @@ static inline int radix_tree_exception(void *arg) */ static inline void radix_tree_replace_slot(void **pslot, void *item) { - BUG_ON(radix_tree_is_indirect_ptr(item)); + BUG_ON(radix_tree_is_internal_node(item)); rcu_assign_pointer(*pslot, item); } @@ -288,14 +280,18 @@ bool __radix_tree_delete_node(struct radix_tree_root *root, struct radix_tree_node *node); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -unsigned int -radix_tree_gang_lookup(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items); +struct radix_tree_node *radix_tree_replace_clear_tags( + struct radix_tree_root *root, + unsigned long index, void *entry); +unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, + void **results, unsigned long first_index, + unsigned int max_items); unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); +int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -327,8 +323,9 @@ static inline void radix_tree_preload_end(void) * struct radix_tree_iter - radix tree iterator state * * @index: index of current slot - * @next_index: next-to-last index for this chunk + * @next_index: one beyond the last index for this chunk * @tags: bit-mask for tag-iterating + * @shift: shift for the node that holds our slots * * This radix tree iterator works in terms of "chunks" of slots. A chunk is a * subinterval of slots contained within one radix tree leaf node. It is @@ -341,8 +338,20 @@ struct radix_tree_iter { unsigned long index; unsigned long next_index; unsigned long tags; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif }; +static inline unsigned int iter_shift(struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ @@ -399,9 +408,16 @@ static inline __must_check void **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; + iter->tags = 0; return NULL; } +static inline unsigned long +__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) +{ + return iter->index + (slots << iter_shift(iter)); +} + /** * radix_tree_iter_next - resume iterating when the chunk may be invalid * @iter: iterator state @@ -413,7 +429,7 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) static inline __must_check void **radix_tree_iter_next(struct radix_tree_iter *iter) { - iter->next_index = iter->index + 1; + iter->next_index = __radix_tree_iter_add(iter, 1); iter->tags = 0; return NULL; } @@ -427,7 +443,12 @@ void **radix_tree_iter_next(struct radix_tree_iter *iter) static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { - return iter->next_index - iter->index; + return (iter->next_index - iter->index) >> iter_shift(iter); +} + +static inline struct radix_tree_node *entry_to_node(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); } /** @@ -445,24 +466,49 @@ static __always_inline void ** radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { + void *canon = slot; + iter->tags >>= 1; + if (unlikely(!iter->tags)) + return NULL; + while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(slot[1])) { + if (entry_to_node(slot[1]) == canon) { + iter->tags >>= 1; + iter->index = __radix_tree_iter_add(iter, 1); + slot++; + continue; + } + iter->next_index = __radix_tree_iter_add(iter, 1); + return NULL; + } if (likely(iter->tags & 1ul)) { - iter->index++; + iter->index = __radix_tree_iter_add(iter, 1); return slot + 1; } - if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { + if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); iter->tags >>= offset; - iter->index += offset + 1; + iter->index = __radix_tree_iter_add(iter, offset + 1); return slot + offset + 1; } } else { - long size = radix_tree_chunk_size(iter); + long count = radix_tree_chunk_size(iter); + void *canon = slot; - while (--size > 0) { + while (--count > 0) { slot++; - iter->index++; + iter->index = __radix_tree_iter_add(iter, 1); + + if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(*slot)) { + if (entry_to_node(*slot) == canon) + continue; + iter->next_index = iter->index; + break; + } + if (likely(*slot)) return slot; if (flags & RADIX_TREE_ITER_CONTIG) { @@ -476,34 +522,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) } /** - * radix_tree_for_each_chunk - iterate over chunks - * - * @slot: the void** variable for pointer to chunk first slot - * @root: the struct radix_tree_root pointer - * @iter: the struct radix_tree_iter pointer - * @start: iteration starting index - * @flags: RADIX_TREE_ITER_* and tag index - * - * Locks can be released and reacquired between iterations. - */ -#define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ - for (slot = radix_tree_iter_init(iter, start) ; \ - (slot = radix_tree_next_chunk(root, iter, flags)) ;) - -/** - * radix_tree_for_each_chunk_slot - iterate over slots in one chunk - * - * @slot: the void** variable, at the beginning points to chunk first slot - * @iter: the struct radix_tree_iter pointer - * @flags: RADIX_TREE_ITER_*, should be constant - * - * This macro is designed to be nested inside radix_tree_for_each_chunk(). - * @slot points to the radix tree slot, @iter->index contains its index. - */ -#define radix_tree_for_each_chunk_slot(slot, iter, flags) \ - for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) - -/** * radix_tree_for_each_slot - iterate over non-empty slots * * @slot: the void** variable for pointer to slot diff --git a/include/linux/random.h b/include/linux/random.h index 9c29122037f9..3d6e9815cd85 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -26,7 +26,6 @@ extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); -void generate_random_uuid(unsigned char uuid_out[16]); extern int random_int_secret_init(void); #ifndef MODULE @@ -96,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) #ifdef CONFIG_ARCH_RANDOM # include <asm/archrandom.h> #else -static inline int arch_get_random_long(unsigned long *v) +static inline bool arch_get_random_long(unsigned long *v) { return 0; } -static inline int arch_get_random_int(unsigned int *v) +static inline bool arch_get_random_int(unsigned int *v) { return 0; } -static inline int arch_has_random(void) +static inline bool arch_has_random(void) { return 0; } -static inline int arch_get_random_seed_long(unsigned long *v) +static inline bool arch_get_random_seed_long(unsigned long *v) { return 0; } -static inline int arch_get_random_seed_int(unsigned int *v) +static inline bool arch_get_random_seed_int(unsigned int *v) { return 0; } -static inline int arch_has_random_seed(void) +static inline bool arch_has_random_seed(void) { return 0; } diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 18102529254e..57c9e0622a38 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -2,11 +2,15 @@ #define _LINUX_RATELIMIT_H #include <linux/param.h> +#include <linux/sched.h> #include <linux/spinlock.h> #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) #define DEFAULT_RATELIMIT_BURST 10 +/* issue num suppressed message on exit */ +#define RATELIMIT_MSG_ON_RELEASE BIT(0) + struct ratelimit_state { raw_spinlock_t lock; /* protect the state */ @@ -15,6 +19,7 @@ struct ratelimit_state { int printed; int missed; unsigned long begin; + unsigned long flags; }; #define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ @@ -34,12 +39,35 @@ struct ratelimit_state { static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { + memset(rs, 0, sizeof(*rs)); + raw_spin_lock_init(&rs->lock); - rs->interval = interval; - rs->burst = burst; - rs->printed = 0; - rs->missed = 0; - rs->begin = 0; + rs->interval = interval; + rs->burst = burst; +} + +static inline void ratelimit_default_init(struct ratelimit_state *rs) +{ + return ratelimit_state_init(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); +} + +static inline void ratelimit_state_exit(struct ratelimit_state *rs) +{ + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) + return; + + if (rs->missed) { + pr_warn("%s: %d output lines suppressed due to ratelimiting\n", + current->comm, rs->missed); + rs->missed = 0; + } +} + +static inline void +ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) +{ + rs->flags = flags; } extern struct ratelimit_state printk_ratelimit_state; diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index b6900099ea81..e585018498d5 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -76,6 +76,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *); /* Fast replacement of a single node without remove/rebalance/add/rebalance */ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); +extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link) diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 14d7b831b63a..d076183e49be 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -130,6 +130,19 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, WRITE_ONCE(root->rb_node, new); } +static inline void +__rb_change_child_rcu(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + rcu_assign_pointer(parent->rb_left, new); + else + rcu_assign_pointer(parent->rb_right, new); + } else + rcu_assign_pointer(root->rb_node, new); +} + extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 17d4f849c65e..8beb98dcf14f 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -488,6 +488,42 @@ static inline void hlist_add_head_rcu(struct hlist_node *n, } /** + * hlist_add_tail_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_tail_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *i, *last = NULL; + + for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i)) + last = i; + + if (last) { + n->next = last->next; + n->pprev = &last->next; + rcu_assign_pointer(hlist_next_rcu(last), n); + } else { + hlist_add_head_rcu(n, h); + } +} + +/** * hlist_add_before_rcu * @n: the new element to add to the hash list. * @next: the existing element to add the new element before. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2657aff2725b..1aa62e1a761b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -45,6 +45,7 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/ktime.h> +#include <linux/irqflags.h> #include <asm/barrier.h> @@ -379,12 +380,13 @@ static inline void rcu_init_nohz(void) * in the inner idle loop. * * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) - * will tell RCU that it needs to pay attending, invoke its argument - * (in this example, a call to the do_something_with_RCU() function), + * will tell RCU that it needs to pay attention, invoke its argument + * (in this example, calling the do_something_with_RCU() function), * and then tell RCU to go back to ignoring this CPU. It is permissible - * to nest RCU_NONIDLE() wrappers, but the nesting level is currently - * quite limited. If deeper nesting is required, it will be necessary - * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. + * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is + * on the order of a million or so, even on 32-bit systems). It is + * not legal to block within RCU_NONIDLE(), nor is it permissible to + * transfer control either into or out of RCU_NONIDLE()'s statement. */ #define RCU_NONIDLE(a) \ do { \ @@ -508,14 +510,7 @@ int rcu_read_lock_bh_held(void); * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side * critical section unless it can prove otherwise. */ -#ifdef CONFIG_PREEMPT_COUNT int rcu_read_lock_sched_held(void); -#else /* #ifdef CONFIG_PREEMPT_COUNT */ -static inline int rcu_read_lock_sched_held(void) -{ - return 1; -} -#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -532,18 +527,10 @@ static inline int rcu_read_lock_bh_held(void) return 1; } -#ifdef CONFIG_PREEMPT_COUNT static inline int rcu_read_lock_sched_held(void) { - return preempt_count() != 0 || irqs_disabled(); + return !preemptible(); } -#else /* #ifdef CONFIG_PREEMPT_COUNT */ -static inline int rcu_read_lock_sched_held(void) -{ - return 1; -} -#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ - #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU @@ -626,6 +613,12 @@ static inline void rcu_preempt_sleep_check(void) rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) +#define rcu_dereference_raw(p) \ +({ \ + /* Dependency order vs. p above. */ \ + typeof(p) ________p1 = lockless_dereference(p); \ + ((typeof(*p) __force __kernel *)(________p1)); \ +}) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable @@ -664,7 +657,16 @@ static inline void rcu_preempt_sleep_check(void) * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ -#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) +#define rcu_assign_pointer(p, v) \ +({ \ + uintptr_t _r_a_p__v = (uintptr_t)(v); \ + \ + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ + else \ + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ + _r_a_p__v; \ +}) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing @@ -744,8 +746,6 @@ static inline void rcu_preempt_sleep_check(void) __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ __rcu) -#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ - /* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. @@ -1144,4 +1144,17 @@ static inline void rcu_sysidle_force_exit(void) #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ +/* + * Dump the ftrace buffer, but only one time per callsite per boot. + */ +#define rcu_ftrace_dump(oops_dump_mode) \ +do { \ + static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ + \ + if (!atomic_read(&___rfd_beenhere) && \ + !atomic_xchg(&___rfd_beenhere, 1)) \ + ftrace_dump(oops_dump_mode); \ +} while (0) + + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 64809aea661c..ac81e4063b40 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -149,6 +149,22 @@ static inline unsigned long rcu_batches_completed_sched(void) return 0; } +/* + * Return the number of expedited grace periods completed. + */ +static inline unsigned long rcu_exp_batches_completed(void) +{ + return 0; +} + +/* + * Return the number of expedited sched grace periods completed. + */ +static inline unsigned long rcu_exp_batches_completed_sched(void) +{ + return 0; +} + static inline void rcu_force_quiescent_state(void) { } @@ -227,4 +243,11 @@ static inline void rcu_all_qs(void) barrier(); /* Avoid RCU read-side critical sections leaking across. */ } +/* RCUtree hotplug events */ +#define rcutree_prepare_cpu NULL +#define rcutree_online_cpu NULL +#define rcutree_offline_cpu NULL +#define rcutree_dead_cpu NULL +#define rcutree_dying_cpu NULL + #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index ad1eda9fa4da..63a4e4cf40a5 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -87,6 +87,8 @@ unsigned long rcu_batches_started_sched(void); unsigned long rcu_batches_completed(void); unsigned long rcu_batches_completed_bh(void); unsigned long rcu_batches_completed_sched(void); +unsigned long rcu_exp_batches_completed(void); +unsigned long rcu_exp_batches_completed_sched(void); void show_rcu_gp_kthreads(void); void rcu_force_quiescent_state(void); @@ -109,4 +111,11 @@ bool rcu_is_watching(void); void rcu_all_qs(void); +/* RCUtree hotplug events */ +int rcutree_prepare_cpu(unsigned int cpu); +int rcutree_online_cpu(unsigned int cpu); +int rcutree_offline_cpu(unsigned int cpu); +int rcutree_dead_cpu(unsigned int cpu); +int rcutree_dying_cpu(unsigned int cpu); + #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 3dc08ce15426..2c12cc5af744 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -95,6 +95,45 @@ struct reg_sequence { #define regmap_fields_force_update_bits(field, id, mask, val) \ regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) +/** + * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs + * @map: Regmap to read from + * @addr: Address to poll + * @val: Unsigned integer variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. Must not be called + * from atomic context if sleep_us or timeout_us are used. + * + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + */ +#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ +({ \ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ + int ret; \ + might_sleep_if(sleep_us); \ + for (;;) { \ + ret = regmap_read((map), (addr), &(val)); \ + if (ret) \ + break; \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + ret = regmap_read((map), (addr), &(val)); \ + break; \ + } \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ + } \ + ret ?: ((cond) ? 0 : -ETIMEDOUT); \ +}) + #ifdef CONFIG_REGMAP enum regmap_endian { @@ -851,6 +890,12 @@ struct regmap_irq { * @num_type_reg: Number of type registers. * @type_reg_stride: Stride to use for chips where type registers are not * contiguous. + * @handle_pre_irq: Driver specific callback to handle interrupt from device + * before regmap_irq_handler process the interrupts. + * @handle_post_irq: Driver specific callback to handle interrupt from device + * after handling the interrupts in regmap_irq_handler(). + * @irq_drv_data: Driver specific IRQ data which is passed as parameter when + * driver specific pre/post interrupt handler is called. */ struct regmap_irq_chip { const char *name; @@ -877,6 +922,10 @@ struct regmap_irq_chip { int num_type_reg; unsigned int type_reg_stride; + + int (*handle_pre_irq)(void *irq_drv_data); + int (*handle_post_irq)(void *irq_drv_data); + void *irq_drv_data; }; struct regmap_irq_chip_data; diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h index 2eb386017fa5..113d861a1e4c 100644 --- a/include/linux/regulator/act8865.h +++ b/include/linux/regulator/act8865.h @@ -69,11 +69,13 @@ enum { * @id: regulator id * @name: regulator name * @init_data: regulator init data + * @of_node: device tree node (optional) */ struct act8865_regulator_data { int id; const char *name; struct regulator_init_data *init_data; + struct device_node *of_node; }; /** diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 48603506f8de..cae500b2c1d7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -224,7 +224,6 @@ int regulator_bulk_force_disable(int num_consumers, void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers); -int regulator_can_change_voltage(struct regulator *regulator); int regulator_count_voltages(struct regulator *regulator); int regulator_list_voltage(struct regulator *regulator, unsigned selector); int regulator_is_supported_voltage(struct regulator *regulator, @@ -436,11 +435,6 @@ static inline void regulator_bulk_free(int num_consumers, { } -static inline int regulator_can_change_voltage(struct regulator *regulator) -{ - return 0; -} - static inline int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h index a43a5ca1167b..80cb40b7c88d 100644 --- a/include/linux/regulator/da9211.h +++ b/include/linux/regulator/da9211.h @@ -1,5 +1,6 @@ /* - * da9211.h - Regulator device driver for DA9211/DA9213/DA9215 + * da9211.h - Regulator device driver for DA9211/DA9212 + * /DA9213/DA9214/DA9215 * Copyright (C) 2015 Dialog Semiconductor Ltd. * * This program is free software; you can redistribute it and/or @@ -22,7 +23,9 @@ enum da9211_chip_id { DA9211, + DA9212, DA9213, + DA9214, DA9215, }; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index cd271e89a7e6..fcfa40a6692c 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -93,6 +93,9 @@ struct regulator_linear_range { * @get_current_limit: Get the configured limit for a current-limited regulator. * @set_input_current_limit: Configure an input limit. * + * @set_over_current_protection: Support capability of automatically shutting + * down when detecting an over current event. + * * @set_active_discharge: Set active discharge enable/disable of regulators. * * @set_mode: Set the configured operating mode for the regulator. @@ -255,6 +258,8 @@ enum regulator_type { * * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector + * @csel_reg: Register for TPS65218 LS3 current regulator + * @csel_mask: Mask for TPS65218 LS3 current regulator * @apply_reg: Register for initiate voltage change on the output when * using regulator_set_voltage_sel_regmap * @apply_bit: Register bitfield used for initiate voltage change on the @@ -292,7 +297,7 @@ struct regulator_desc { const struct regulator_desc *, struct regulator_config *); int id; - bool continuous_voltage_range; + unsigned int continuous_voltage_range:1; unsigned n_voltages; const struct regulator_ops *ops; int irq; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 5d627c83a630..ad3e5158e586 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -97,6 +97,7 @@ struct regulator_state { * @ramp_disable: Disable ramp delay when initialising or when setting voltage. * @soft_start: Enable soft start so that voltage ramps slowly. * @pull_down: Enable pull down when regulator is disabled. + * @over_current_protection: Auto disable on over current event. * * @input_uV: Input voltage for regulator when supplied by another regulator. * diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h index f6a8a16a0d4d..2fcb9980262a 100644 --- a/include/linux/regulator/max8973-regulator.h +++ b/include/linux/regulator/max8973-regulator.h @@ -54,6 +54,10 @@ * @reg_init_data: The regulator init data. * @control_flags: Control flags which are ORed value of above flags to * configure device. + * @junction_temp_warning: Junction temp in millicelcius on which warning need + * to be set. Thermal functionality is only supported on + * MAX77621. The threshold warning supported by MAX77621 + * are 120C and 140C. * @enable_ext_control: Enable the voltage enable/disable through external * control signal from EN input pin. If it is false then * voltage output will be enabled/disabled through EN bit of @@ -67,6 +71,7 @@ struct max8973_regulator_platform_data { struct regulator_init_data *reg_init_data; unsigned long control_flags; + unsigned long junction_temp_warning; bool enable_ext_control; int enable_gpio; int dvs_gpio; diff --git a/include/linux/regulator/mt6323-regulator.h b/include/linux/regulator/mt6323-regulator.h new file mode 100644 index 000000000000..67011cd1ce55 --- /dev/null +++ b/include/linux/regulator/mt6323-regulator.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016 MediaTek Inc. + * Author: Chen Zhong <chen.zhong@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_MT6323_H +#define __LINUX_REGULATOR_MT6323_H + +enum { + MT6323_ID_VPROC = 0, + MT6323_ID_VSYS, + MT6323_ID_VPA, + MT6323_ID_VTCXO, + MT6323_ID_VCN28, + MT6323_ID_VCN33_BT, + MT6323_ID_VCN33_WIFI, + MT6323_ID_VA, + MT6323_ID_VCAMA, + MT6323_ID_VIO28 = 9, + MT6323_ID_VUSB, + MT6323_ID_VMC, + MT6323_ID_VMCH, + MT6323_ID_VEMC3V3, + MT6323_ID_VGP1, + MT6323_ID_VGP2, + MT6323_ID_VGP3, + MT6323_ID_VCN18, + MT6323_ID_VSIM1, + MT6323_ID_VSIM2, + MT6323_ID_VRTC, + MT6323_ID_VCAMAF, + MT6323_ID_VIBR, + MT6323_ID_VRF18, + MT6323_ID_VM, + MT6323_ID_VIO18, + MT6323_ID_VCAMD, + MT6323_ID_VCAMIO, + MT6323_ID_RG_MAX, +}; + +#define MT6323_MAX_REGULATOR MT6323_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_MT6323_H */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 9c4e1384f636..1c457a8dd5a6 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -365,6 +365,8 @@ enum rproc_state { /** * enum rproc_crash_type - remote processor crash types * @RPROC_MMUFAULT: iommu fault + * @RPROC_WATCHDOG: watchdog bite + * @RPROC_FATAL_ERROR fatal error * * Each element of the enum is used as an array index. So that, the value of * the elements should be always something sane. @@ -373,6 +375,8 @@ enum rproc_state { */ enum rproc_crash_type { RPROC_MMUFAULT, + RPROC_WATCHDOG, + RPROC_FATAL_ERROR, }; /** diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 5a0b64cf68b4..b0f305e77b7f 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class; extern struct lock_class_key reservation_seqcount_class; extern const char reservation_seqcount_string[]; +/** + * struct reservation_object_list - a list of shared fences + * @rcu: for internal use + * @shared_count: table of shared fences + * @shared_max: for growing shared fence table + * @shared: shared fence table + */ struct reservation_object_list { struct rcu_head rcu; u32 shared_count, shared_max; struct fence __rcu *shared[]; }; +/** + * struct reservation_object - a reservation object manages fences for a buffer + * @lock: update side lock + * @seq: sequence count for managing RCU read-side synchronization + * @fence_excl: the exclusive fence, if there is one currently + * @fence: list of current shared fences + * @staged: staged copy of shared fences for RCU updates + */ struct reservation_object { struct ww_mutex lock; seqcount_t seq; @@ -68,6 +83,10 @@ struct reservation_object { #define reservation_object_assert_held(obj) \ lockdep_assert_held(&(obj)->lock.base) +/** + * reservation_object_init - initialize a reservation object + * @obj: the reservation object + */ static inline void reservation_object_init(struct reservation_object *obj) { @@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj) obj->staged = NULL; } +/** + * reservation_object_fini - destroys a reservation object + * @obj: the reservation object + */ static inline void reservation_object_fini(struct reservation_object *obj) { @@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj) ww_mutex_destroy(&obj->lock); } +/** + * reservation_object_get_list - get the reservation object's + * shared fence list, with update-side lock held + * @obj: the reservation object + * + * Returns the shared fence list. Does NOT take references to + * the fence. The obj->lock must be held. + */ static inline struct reservation_object_list * reservation_object_get_list(struct reservation_object *obj) { @@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj) reservation_object_held(obj)); } +/** + * reservation_object_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. The obj->lock must be held. + * + * RETURNS + * The exclusive fence or NULL + */ static inline struct fence * reservation_object_get_excl(struct reservation_object *obj) { @@ -120,6 +162,35 @@ reservation_object_get_excl(struct reservation_object *obj) reservation_object_held(obj)); } +/** + * reservation_object_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ +static inline struct fence * +reservation_object_get_excl_rcu(struct reservation_object *obj) +{ + struct fence *fence; + unsigned seq; +retry: + seq = read_seqcount_begin(&obj->seq); + rcu_read_lock(); + fence = rcu_dereference(obj->fence_excl); + if (read_seqcount_retry(&obj->seq, seq)) { + rcu_read_unlock(); + goto retry; + } + fence = fence_get(fence); + rcu_read_unlock(); + return fence; +} + int reservation_object_reserve_shared(struct reservation_object *obj); void reservation_object_add_shared_fence(struct reservation_object *obj, struct fence *fence); diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index a3a5bcdb1d02..db1fe6772ad5 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -31,6 +31,7 @@ struct of_phandle_args; * @ops: a pointer to device specific struct reset_control_ops * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices + * @reset_control_head: head of internal list of requested reset controls * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the @@ -41,6 +42,7 @@ struct reset_controller_dev { const struct reset_control_ops *ops; struct module *owner; struct list_head list; + struct list_head reset_control_head; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, @@ -51,4 +53,8 @@ struct reset_controller_dev { int reset_controller_register(struct reset_controller_dev *rcdev); void reset_controller_unregister(struct reset_controller_dev *rcdev); +struct device; +int devm_reset_controller_register(struct device *dev, + struct reset_controller_dev *rcdev); + #endif diff --git a/include/linux/reset.h b/include/linux/reset.h index c4c097de0ba9..5daff15722d3 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -1,8 +1,8 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ -struct device; -struct device_node; +#include <linux/device.h> + struct reset_control; #ifdef CONFIG_RESET_CONTROLLER @@ -12,9 +12,11 @@ int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); int reset_control_status(struct reset_control *rstc); -struct reset_control *reset_control_get(struct device *dev, const char *id); +struct reset_control *__of_reset_control_get(struct device_node *node, + const char *id, int index, int shared); void reset_control_put(struct reset_control *rstc); -struct reset_control *devm_reset_control_get(struct device *dev, const char *id); +struct reset_control *__devm_reset_control_get(struct device *dev, + const char *id, int index, int shared); int __must_check device_reset(struct device *dev); @@ -23,24 +25,6 @@ static inline int device_reset_optional(struct device *dev) return device_reset(dev); } -static inline struct reset_control *reset_control_get_optional( - struct device *dev, const char *id) -{ - return reset_control_get(dev, id); -} - -static inline struct reset_control *devm_reset_control_get_optional( - struct device *dev, const char *id) -{ - return devm_reset_control_get(dev, id); -} - -struct reset_control *of_reset_control_get(struct device_node *node, - const char *id); - -struct reset_control *of_reset_control_get_by_index( - struct device_node *node, int index); - #else static inline int reset_control_reset(struct reset_control *rstc) @@ -72,49 +56,314 @@ static inline void reset_control_put(struct reset_control *rstc) WARN_ON(1); } +static inline int __must_check device_reset(struct device *dev) +{ + WARN_ON(1); + return -ENOTSUPP; +} + static inline int device_reset_optional(struct device *dev) { return -ENOTSUPP; } -static inline struct reset_control *__must_check reset_control_get( - struct device *dev, const char *id) +static inline struct reset_control *__of_reset_control_get( + struct device_node *node, + const char *id, int index, int shared) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct reset_control *__devm_reset_control_get( + struct device *dev, + const char *id, int index, int shared) { + return ERR_PTR(-ENOTSUPP); +} + +#endif /* CONFIG_RESET_CONTROLLER */ + +/** + * reset_control_get_exclusive - Lookup and obtain an exclusive reference + * to a reset controller. + * @dev: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * If this function is called more then once for the same reset_control it will + * return -EBUSY. + * + * See reset_control_get_shared for details on shared references to + * reset-controls. + * + * Use of id names is optional. + */ +static inline struct reset_control * +__must_check reset_control_get_exclusive(struct device *dev, const char *id) +{ +#ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); - return ERR_PTR(-EINVAL); +#endif + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); +} + +/** + * reset_control_get_shared - Lookup and obtain a shared reference to a + * reset controller. + * @dev: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * This function is intended for use with reset-controls which are shared + * between hardware-blocks. + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * + * Use of id names is optional. + */ +static inline struct reset_control *reset_control_get_shared( + struct device *dev, const char *id) +{ + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); } -static inline struct reset_control *__must_check devm_reset_control_get( +static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); +} + +static inline struct reset_control *reset_control_get_optional_shared( + struct device *dev, const char *id) +{ + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); +} + +/** + * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference + * to a reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +static inline struct reset_control *of_reset_control_get_exclusive( + struct device_node *node, const char *id) +{ + return __of_reset_control_get(node, id, 0, 0); +} + +/** + * of_reset_control_get_shared - Lookup and obtain an shared reference + * to a reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +static inline struct reset_control *of_reset_control_get_shared( + struct device_node *node, const char *id) +{ + return __of_reset_control_get(node, id, 0, 1); +} + +/** + * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive + * reference to a reset controller + * by index. + * @node: device to be reset by the controller + * @index: index of the reset controller + * + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. + */ +static inline struct reset_control *of_reset_control_get_exclusive_by_index( + struct device_node *node, int index) +{ + return __of_reset_control_get(node, NULL, index, 0); +} + +/** + * of_reset_control_get_shared_by_index - Lookup and obtain an shared + * reference to a reset controller + * by index. + * @node: device to be reset by the controller + * @index: index of the reset controller + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. + */ +static inline struct reset_control *of_reset_control_get_shared_by_index( + struct device_node *node, int index) +{ + return __of_reset_control_get(node, NULL, index, 1); +} + +/** + * devm_reset_control_get_exclusive - resource managed + * reset_control_get_exclusive() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_exclusive(). For reset controllers returned + * from this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_get_exclusive() for more information. + */ +static inline struct reset_control * +__must_check devm_reset_control_get_exclusive(struct device *dev, + const char *id) +{ +#ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); - return ERR_PTR(-EINVAL); +#endif + return __devm_reset_control_get(dev, id, 0, 0); } -static inline struct reset_control *reset_control_get_optional( +/** + * devm_reset_control_get_shared - resource managed reset_control_get_shared() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_shared(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver detach. + * See reset_control_get_shared() for more information. + */ +static inline struct reset_control *devm_reset_control_get_shared( struct device *dev, const char *id) { - return ERR_PTR(-ENOTSUPP); + return __devm_reset_control_get(dev, id, 0, 1); } -static inline struct reset_control *devm_reset_control_get_optional( +static inline struct reset_control *devm_reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return ERR_PTR(-ENOTSUPP); + return __devm_reset_control_get(dev, id, 0, 0); +} + +static inline struct reset_control *devm_reset_control_get_optional_shared( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, 1); +} + +/** + * devm_reset_control_get_exclusive_by_index - resource managed + * reset_control_get_exclusive() + * @dev: device to be reset by the controller + * @index: index of the reset controller + * + * Managed reset_control_get_exclusive(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_get_exclusive() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_exclusive_by_index(struct device *dev, int index) +{ + return __devm_reset_control_get(dev, NULL, index, 0); +} + +/** + * devm_reset_control_get_shared_by_index - resource managed + * reset_control_get_shared + * @dev: device to be reset by the controller + * @index: index of the reset controller + * + * Managed reset_control_get_shared(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver detach. + * See reset_control_get_shared() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_shared_by_index(struct device *dev, int index) +{ + return __devm_reset_control_get(dev, NULL, index, 1); +} + +/* + * TEMPORARY calls to use during transition: + * + * of_reset_control_get() => of_reset_control_get_exclusive() + * + * These inline function calls will be removed once all consumers + * have been moved over to the new explicit API. + */ +static inline struct reset_control *reset_control_get( + struct device *dev, const char *id) +{ + return reset_control_get_exclusive(dev, id); +} + +static inline struct reset_control *reset_control_get_optional( + struct device *dev, const char *id) +{ + return reset_control_get_optional_exclusive(dev, id); } static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { - return ERR_PTR(-ENOTSUPP); + return of_reset_control_get_exclusive(node, id); } static inline struct reset_control *of_reset_control_get_by_index( struct device_node *node, int index) { - return ERR_PTR(-ENOTSUPP); + return of_reset_control_get_exclusive_by_index(node, index); } -#endif /* CONFIG_RESET_CONTROLLER */ +static inline struct reset_control *devm_reset_control_get( + struct device *dev, const char *id) +{ + return devm_reset_control_get_exclusive(dev, id); +} +static inline struct reset_control *devm_reset_control_get_optional( + struct device *dev, const char *id) +{ + return devm_reset_control_get_optional_exclusive(dev, id); + +} + +static inline struct reset_control *devm_reset_control_get_by_index( + struct device *dev, int index) +{ + return devm_reset_control_get_exclusive_by_index(dev, index); +} #endif diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 63bd7601b6de..3eef0802a0cd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -346,7 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, struct bucket_table *old_tbl); int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, + gfp_t gfp); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); diff --git a/include/linux/rio.h b/include/linux/rio.h index aa2323893e8d..37b95c4af99d 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -163,6 +163,7 @@ enum rio_device_state { * @dst_ops: Destination operation capabilities * @comp_tag: RIO component tag * @phys_efptr: RIO device extended features pointer + * @phys_rmap: LP-Serial Register Map Type (1 or 2) * @em_efptr: RIO Error Management features pointer * @dma_mask: Mask of bits of RIO address this device implements * @driver: Driver claiming this device @@ -193,6 +194,7 @@ struct rio_dev { u32 dst_ops; u32 comp_tag; u32 phys_efptr; + u32 phys_rmap; u32 em_efptr; u64 dma_mask; struct rio_driver *driver; /* RIO driver claiming this device */ @@ -237,11 +239,6 @@ struct rio_dbell { void *dev_id; }; -enum rio_phy_type { - RIO_PHY_PARALLEL, - RIO_PHY_SERIAL, -}; - /** * struct rio_mport - RIO master port info * @dbells: List of doorbell events @@ -259,8 +256,8 @@ enum rio_phy_type { * @id: Port ID, unique among all ports * @index: Port index, unique among all port interfaces of the same type * @sys_size: RapidIO common transport system size - * @phy_type: RapidIO phy type * @phys_efptr: RIO port extended features pointer + * @phys_rmap: LP-Serial EFB Register Mapping type (1 or 2). * @name: Port name string * @dev: device structure associated with an mport * @priv: Master port private data @@ -289,8 +286,8 @@ struct rio_mport { * 0 - Small size. 256 devices. * 1 - Large size, 65536 devices. */ - enum rio_phy_type phy_type; /* RapidIO phy type */ u32 phys_efptr; + u32 phys_rmap; unsigned char name[RIO_MAX_MPORT_NAME]; struct device dev; void *priv; /* Master port private data */ @@ -425,7 +422,7 @@ struct rio_ops { int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf); void *(*get_inb_message)(struct rio_mport *mport, int mbox); int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart, - u64 rstart, u32 size, u32 flags); + u64 rstart, u64 size, u32 flags); void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart); int (*query_mport)(struct rio_mport *mport, struct rio_mport_attr *attr); diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index 2543bc163d54..334c576c151c 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h @@ -38,5 +38,7 @@ #define RIO_DID_IDTVPS1616 0x0377 #define RIO_DID_IDTSPS1616 0x0378 #define RIO_DID_TSI721 0x80ab +#define RIO_DID_IDTRXS1632 0x80e5 +#define RIO_DID_IDTRXS2448 0x80e6 #endif /* LINUX_RIO_IDS_H */ diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index 1063ae382bc2..40c04efe7409 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h @@ -42,9 +42,11 @@ #define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ #define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ +#define RIO_PEF_DEV32 0x00001000 /* [III] PE supports Common TRansport Dev32 */ #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ -#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ +#define RIO_PEF_CTLS 0x00000010 /* [III] Common Transport Large System (< rev.3) */ +#define RIO_PEF_DEV16 0x00000010 /* [III] PE Supports Common Transport Dev16 (rev.3) */ #define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ #define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ #define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */ @@ -194,70 +196,101 @@ #define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK) /* Extended Feature Block IDs */ -#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */ -#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */ -#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */ -#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */ -#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */ -#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */ -#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */ -#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */ -#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */ -#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */ +#define RIO_EFB_SER_EP_M1_ID 0x0001 /* [VI] LP-Serial EP Devices, Map I */ +#define RIO_EFB_SER_EP_SW_M1_ID 0x0002 /* [VI] LP-Serial EP w SW Recovery Devices, Map I */ +#define RIO_EFB_SER_EPF_M1_ID 0x0003 /* [VI] LP-Serial EP Free Devices, Map I */ +#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP-Serial EP Devices, RIO 1.2 */ +#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP-Serial EP w SW Recovery Devices, RIO 1.2 */ +#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP-Serial EP Free Devices, RIO 1.2 */ #define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */ +#define RIO_EFB_SER_EPF_SW_M1_ID 0x0009 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map I */ +#define RIO_EFB_SW_ROUTING_TBL 0x000E /* [III] Switch Routing Table Block */ +#define RIO_EFB_SER_EP_M2_ID 0x0011 /* [VI] LP-Serial EP Devices, Map II */ +#define RIO_EFB_SER_EP_SW_M2_ID 0x0012 /* [VI] LP-Serial EP w SW Recovery Devices, Map II */ +#define RIO_EFB_SER_EPF_M2_ID 0x0013 /* [VI] LP-Serial EP Free Devices, Map II */ +#define RIO_EFB_ERR_MGMNT_HS 0x0017 /* [VIII] Error Management Extensions, Hot-Swap only */ +#define RIO_EFB_SER_EPF_SW_M2_ID 0x0019 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map II */ /* - * Physical 8/16 LP-LVDS - * ID=0x0001, Generic End Point Devices - * ID=0x0002, Generic End Point Devices, software assisted recovery option - * ID=0x0003, Generic End Point Free Devices - * - * Physical LP-Serial - * ID=0x0004, Generic End Point Devices - * ID=0x0005, Generic End Point Devices, software assisted recovery option - * ID=0x0006, Generic End Point Free Devices + * Physical LP-Serial Registers Definitions + * Parameters in register macros: + * n - port number, m - Register Map Type (1 or 2) */ #define RIO_PORT_MNT_HEADER 0x0000 #define RIO_PORT_REQ_CTL_CSR 0x0020 -#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */ -#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */ -#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */ +#define RIO_PORT_RSP_CTL_CSR 0x0024 +#define RIO_PORT_LINKTO_CTL_CSR 0x0020 +#define RIO_PORT_RSPTO_CTL_CSR 0x0024 #define RIO_PORT_GEN_CTL_CSR 0x003c #define RIO_PORT_GEN_HOST 0x80000000 #define RIO_PORT_GEN_MASTER 0x40000000 #define RIO_PORT_GEN_DISCOVERED 0x20000000 -#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */ +#define RIO_PORT_N_MNT_REQ_CSR(n, m) (0x40 + (n) * (0x20 * (m))) #define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */ #define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */ -#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */ +#define RIO_PORT_N_MNT_RSP_CSR(n, m) (0x44 + (n) * (0x20 * (m))) #define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */ #define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */ #define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */ -#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */ +#define RIO_PORT_N_ACK_STS_CSR(n) (0x48 + (n) * 0x20) /* Only in RM-I */ #define RIO_PORT_N_ACK_CLEAR 0x80000000 #define RIO_PORT_N_ACK_INBOUND 0x3f000000 #define RIO_PORT_N_ACK_OUTSTAND 0x00003f00 #define RIO_PORT_N_ACK_OUTBOUND 0x0000003f -#define RIO_PORT_N_CTL2_CSR(x) (0x0054 + x*0x20) +#define RIO_PORT_N_CTL2_CSR(n, m) (0x54 + (n) * (0x20 * (m))) #define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000 -#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20) -#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */ -#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */ +#define RIO_PORT_N_ERR_STS_CSR(n, m) (0x58 + (n) * (0x20 * (m))) +#define RIO_PORT_N_ERR_STS_OUT_ES 0x00010000 /* Output Error-stopped */ +#define RIO_PORT_N_ERR_STS_INP_ES 0x00000100 /* Input Error-stopped */ #define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */ +#define RIO_PORT_N_ERR_STS_PORT_UA 0x00000008 /* Port Unavailable */ #define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004 #define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002 #define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001 -#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20) +#define RIO_PORT_N_CTL_CSR(n, m) (0x5c + (n) * (0x20 * (m))) #define RIO_PORT_N_CTL_PWIDTH 0xc0000000 #define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 #define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 #define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */ #define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 #define RIO_PORT_N_CTL_LOCKOUT 0x00000002 -#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000 -#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000 -#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000 -#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000 +#define RIO_PORT_N_CTL_EN_RX 0x00200000 +#define RIO_PORT_N_CTL_EN_TX 0x00400000 +#define RIO_PORT_N_OB_ACK_CSR(n) (0x60 + (n) * 0x40) /* Only in RM-II */ +#define RIO_PORT_N_OB_ACK_CLEAR 0x80000000 +#define RIO_PORT_N_OB_ACK_OUTSTD 0x00fff000 +#define RIO_PORT_N_OB_ACK_OUTBND 0x00000fff +#define RIO_PORT_N_IB_ACK_CSR(n) (0x64 + (n) * 0x40) /* Only in RM-II */ +#define RIO_PORT_N_IB_ACK_INBND 0x00000fff + +/* + * Device-based helper macros for serial port register access. + * d - pointer to rapidio device object, n - port number + */ + +#define RIO_DEV_PORT_N_MNT_REQ_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_MNT_RSP_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_ACK_STS_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_ACK_STS_CSR(n)) + +#define RIO_DEV_PORT_N_CTL2_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_CTL2_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_ERR_STS_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_ERR_STS_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_CTL_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_CTL_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_OB_ACK_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_OB_ACK_CSR(n)) + +#define RIO_DEV_PORT_N_IB_ACK_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_IB_ACK_CSR(n)) /* * Error Management Extensions (RapidIO 1.3+, Part 8) @@ -268,6 +301,7 @@ /* General EM Registers (Common for all Ports) */ #define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */ +#define RIO_EM_EMHS_CAR 0x004 /* EM Functionality CAR */ #define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */ #define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */ #define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */ @@ -278,15 +312,33 @@ #define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */ #define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */ #define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */ +#define RIO_EM_LTL_DID32_CAP 0x020 /* Logical/Transport Layer Dev32 DestID Capture CSR */ +#define RIO_EM_LTL_SID32_CAP 0x024 /* Logical/Transport Layer Dev32 source ID Capture CSR */ #define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */ +#define RIO_EM_PW_TGT_DEVID_D16M 0xff000000 /* Port-write Target DID16 MSB */ +#define RIO_EM_PW_TGT_DEVID_D8 0x00ff0000 /* Port-write Target DID16 LSB or DID8 */ +#define RIO_EM_PW_TGT_DEVID_DEV16 0x00008000 /* Port-write Target DID16 LSB or DID8 */ +#define RIO_EM_PW_TGT_DEVID_DEV32 0x00004000 /* Port-write Target DID16 LSB or DID8 */ #define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */ +#define RIO_EM_PKT_TTL_VAL 0xffff0000 /* Packet Time-to-live value */ +#define RIO_EM_PW_TGT32_DEVID 0x030 /* Port-write Dev32 Target deviceID CSR */ +#define RIO_EM_PW_TX_CTRL 0x034 /* Port-write Transmission Control CSR */ +#define RIO_EM_PW_TX_CTRL_PW_DIS 0x00000001 /* Port-write Transmission Disable bit */ /* Per-Port EM Registers */ #define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */ #define REM_PED_IMPL_SPEC 0x80000000 +#define REM_PED_LINK_OK2U 0x40000000 /* Link OK to Uninit transition */ +#define REM_PED_LINK_UPDA 0x20000000 /* Link Uninit Packet Discard Active */ +#define REM_PED_LINK_U2OK 0x10000000 /* Link Uninit to OK transition */ #define REM_PED_LINK_TO 0x00000001 + #define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */ +#define RIO_EM_PN_ERRRATE_EN_OK2U 0x40000000 /* Enable notification for OK2U */ +#define RIO_EM_PN_ERRRATE_EN_UPDA 0x20000000 /* Enable notification for UPDA */ +#define RIO_EM_PN_ERRRATE_EN_U2OK 0x10000000 /* Enable notification for U2OK */ + #define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */ #define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */ #define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */ @@ -294,5 +346,50 @@ #define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */ #define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */ #define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */ +#define RIO_EM_PN_LINK_UDT(x) (0x070 + x*0x40) /* Port N Link Uninit Discard Timer CSR */ +#define RIO_EM_PN_LINK_UDT_TO 0xffffff00 /* Link Uninit Timeout value */ + +/* + * Switch Routing Table Register Block ID=0x000E (RapidIO 3.0+, part 3) + * Register offsets are defined from beginning of the block. + */ + +/* Broadcast Routing Table Control CSR */ +#define RIO_BC_RT_CTL_CSR 0x020 +#define RIO_RT_CTL_THREE_LVL 0x80000000 +#define RIO_RT_CTL_DEV32_RT_CTRL 0x40000000 +#define RIO_RT_CTL_MC_MASK_SZ 0x03000000 /* 3.0+ Part 11: Multicast */ + +/* Broadcast Level 0 Info CSR */ +#define RIO_BC_RT_LVL0_INFO_CSR 0x030 +#define RIO_RT_L0I_NUM_GR 0xff000000 +#define RIO_RT_L0I_GR_PTR 0x00fffc00 + +/* Broadcast Level 1 Info CSR */ +#define RIO_BC_RT_LVL1_INFO_CSR 0x034 +#define RIO_RT_L1I_NUM_GR 0xff000000 +#define RIO_RT_L1I_GR_PTR 0x00fffc00 + +/* Broadcast Level 2 Info CSR */ +#define RIO_BC_RT_LVL2_INFO_CSR 0x038 +#define RIO_RT_L2I_NUM_GR 0xff000000 +#define RIO_RT_L2I_GR_PTR 0x00fffc00 + +/* Per-Port Routing Table registers. + * Register fields defined in the broadcast section above are + * applicable to the corresponding registers below. + */ +#define RIO_SPx_RT_CTL_CSR(x) (0x040 + (0x20 * x)) +#define RIO_SPx_RT_LVL0_INFO_CSR(x) (0x50 + (0x20 * x)) +#define RIO_SPx_RT_LVL1_INFO_CSR(x) (0x54 + (0x20 * x)) +#define RIO_SPx_RT_LVL2_INFO_CSR(x) (0x58 + (0x20 * x)) + +/* Register Formats for Routing Table Group entry. + * Register offsets are calculated using GR_PTR field in the corresponding + * table Level_N and group/entry numbers (see RapidIO 3.0+ Part 3). + */ +#define RIO_RT_Ln_ENTRY_IMPL_DEF 0xf0000000 +#define RIO_RT_Ln_ENTRY_RTE_VAL 0x000003ff +#define RIO_RT_ENTRY_DROP_PKT 0x300 #endif /* LINUX_RIO_REGS_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 49eb4f8ebac9..b46bb5620a76 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -158,14 +158,14 @@ struct anon_vma *page_get_anon_vma(struct page *page); /* * rmap interfaces called when adding or removing pte of page */ -void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void page_move_anon_rmap(struct page *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); -void page_add_file_rmap(struct page *); +void page_add_file_rmap(struct page *, bool); void page_remove_rmap(struct page *, bool); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, diff --git a/include/linux/rmi.h b/include/linux/rmi.h index e0aca1476001..64125443f8a6 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/input.h> +#include <linux/kfifo.h> #include <linux/list.h> #include <linux/module.h> #include <linux/types.h> @@ -99,6 +100,8 @@ struct rmi_2d_sensor_platform_data { bool topbuttonpad; bool kernel_tracking; int dmax; + int dribble; + int palm_detect; }; /** @@ -106,7 +109,7 @@ struct rmi_2d_sensor_platform_data { * @buttonpad - the touchpad is a buttonpad, so enable only the first actual * button that is found. * @trackstick_buttons - Set when the function 30 is handling the physical - * buttons of the trackstick (as a PD/2 passthrough device. + * buttons of the trackstick (as a PS/2 passthrough device). * @disable - the touchpad incorrectly reports F30 and it should be ignored. * This is a special case which is due to misconfigured firmware. */ @@ -116,14 +119,17 @@ struct rmi_f30_data { bool disable; }; -/** - * struct rmi_f01_power - override default power management settings. - * + +/* + * Set the state of a register + * DEFAULT - use the default value set by the firmware config + * OFF - explicitly disable the register + * ON - explicitly enable the register */ -enum rmi_f01_nosleep { - RMI_F01_NOSLEEP_DEFAULT = 0, - RMI_F01_NOSLEEP_OFF = 1, - RMI_F01_NOSLEEP_ON = 2 +enum rmi_reg_state { + RMI_REG_STATE_DEFAULT = 0, + RMI_REG_STATE_OFF = 1, + RMI_REG_STATE_ON = 2 }; /** @@ -143,7 +149,7 @@ enum rmi_f01_nosleep { * when the touch sensor is in doze mode, in units of 10ms. */ struct rmi_f01_power_management { - enum rmi_f01_nosleep nosleep; + enum rmi_reg_state nosleep; u8 wakeup_threshold; u8 doze_holdoff; u8 doze_interval; @@ -204,16 +210,18 @@ struct rmi_device_platform_data_spi { * @reset_delay_ms - after issuing a reset command to the touch sensor, the * driver waits a few milliseconds to give the firmware a chance to * to re-initialize. You can override the default wait period here. + * @irq: irq associated with the attn gpio line, or negative */ struct rmi_device_platform_data { int reset_delay_ms; + int irq; struct rmi_device_platform_data_spi spi_data; /* function handler pdata */ - struct rmi_2d_sensor_platform_data *sensor_pdata; + struct rmi_2d_sensor_platform_data sensor_pdata; struct rmi_f01_power_management power_management; - struct rmi_f30_data *f30_data; + struct rmi_f30_data f30_data; }; /** @@ -264,9 +272,6 @@ struct rmi_transport_dev { struct rmi_device_platform_data pdata; struct input_dev *input; - - void *attn_data; - int attn_size; }; /** @@ -324,17 +329,24 @@ struct rmi_device { }; +struct rmi4_attn_data { + unsigned long irq_status; + size_t size; + void *data; +}; + struct rmi_driver_data { struct list_head function_list; struct rmi_device *rmi_dev; struct rmi_function *f01_container; - bool f01_bootloader_mode; + struct rmi_function *f34_container; + bool bootloader_mode; - u32 attn_count; int num_of_irq_regs; int irq_count; + void *irq_memory; unsigned long *irq_status; unsigned long *fn_irq_bits; unsigned long *current_irq_mask; @@ -343,17 +355,23 @@ struct rmi_driver_data { struct input_dev *input; u8 pdt_props; - u8 bsr; + + u8 num_rx_electrodes; + u8 num_tx_electrodes; bool enabled; + struct mutex enabled_mutex; - void *data; + struct rmi4_attn_data attn_data; + DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16); }; int rmi_register_transport_device(struct rmi_transport_dev *xport); void rmi_unregister_transport_device(struct rmi_transport_dev *xport); -int rmi_process_interrupt_requests(struct rmi_device *rmi_dev); -int rmi_driver_suspend(struct rmi_device *rmi_dev); -int rmi_driver_resume(struct rmi_device *rmi_dev); +void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, + void *data, size_t size); + +int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake); +int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake); #endif diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 82a673905edb..ada50ff36da0 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -169,7 +169,7 @@ struct rpmsg_driver { int register_rpmsg_device(struct rpmsg_channel *dev); void unregister_rpmsg_device(struct rpmsg_channel *dev); -int register_rpmsg_driver(struct rpmsg_driver *drv); +int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); void unregister_rpmsg_driver(struct rpmsg_driver *drv); void rpmsg_destroy_ept(struct rpmsg_endpoint *); struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *, @@ -177,6 +177,22 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *, int rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool); +/* use a macro to avoid include chaining to get THIS_MODULE */ +#define register_rpmsg_driver(drv) \ + __register_rpmsg_driver(drv, THIS_MODULE) + +/** + * module_rpmsg_driver() - Helper macro for registering an rpmsg driver + * @__rpmsg_driver: rpmsg_driver struct + * + * Helper macro for rpmsg drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_rpmsg_driver(__rpmsg_driver) \ + module_driver(__rpmsg_driver, register_rpmsg_driver, \ + unregister_rpmsg_driver) + /** * rpmsg_send() - send a message across to the remote processor * @rpdev: the rpmsg channel diff --git a/include/linux/ds1286.h b/include/linux/rtc/ds1286.h index 45ea0aa0aeb9..45ea0aa0aeb9 100644 --- a/include/linux/ds1286.h +++ b/include/linux/rtc/ds1286.h diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index c006cc900c44..2daece8979f7 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -89,8 +89,9 @@ void net_inc_egress_queue(void); void net_dec_egress_queue(void); #endif -extern void rtnetlink_init(void); -extern void __rtnl_unlock(void); +void rtnetlink_init(void); +void __rtnl_unlock(void); +void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); #define ASSERT_RTNL() do { \ if (unlikely(!rtnl_is_locked())) { \ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index 561e8615528d..ae0528b834cd 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -34,7 +34,7 @@ struct rw_semaphore { extern void __down_read(struct rw_semaphore *sem); extern int __down_read_trylock(struct rw_semaphore *sem); extern void __down_write(struct rw_semaphore *sem); -extern void __down_write_nested(struct rw_semaphore *sem, int subclass); +extern int __must_check __down_write_killable(struct rw_semaphore *sem); extern int __down_write_trylock(struct rw_semaphore *sem); extern void __up_read(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 8f498cdde280..dd1d14250340 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -14,6 +14,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/atomic.h> +#include <linux/err.h> #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include <linux/osq_lock.h> #endif @@ -22,10 +23,11 @@ struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK #include <linux/rwsem-spinlock.h> /* use a generic implementation */ +#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE #else /* All arch specific implementations share the same struct */ struct rw_semaphore { - long count; + atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER @@ -43,6 +45,7 @@ struct rw_semaphore { extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); @@ -52,9 +55,10 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { - return sem->count != 0; + return atomic_long_read(&sem->count) != 0; } +#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) #endif /* Common initializer macros and functions */ @@ -72,7 +76,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) #endif #define __RWSEM_INITIALIZER(name) \ - { .count = RWSEM_UNLOCKED_VALUE, \ + { __RWSEM_INIT_COUNT(name), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ @@ -116,6 +120,7 @@ extern int down_read_trylock(struct rw_semaphore *sem); * lock for writing */ extern void down_write(struct rw_semaphore *sem); +extern int __must_check down_write_killable(struct rw_semaphore *sem); /* * trylock for writing -- returns 1 if successful, 0 if contention @@ -153,6 +158,7 @@ extern void downgrade_write(struct rw_semaphore *sem); */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); # define down_write_nest_lock(sem, nest_lock) \ @@ -173,6 +179,7 @@ extern void up_read_non_owner(struct rw_semaphore *sem); # define down_read_nested(sem, subclass) down_read(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) +# define down_write_killable_nested(sem, subclass) down_write_killable(sem) # define down_read_non_owner(sem) down_read(sem) # define up_read_non_owner(sem) up_read(sem) #endif diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index a53915cd5581..c68307bc306f 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -35,21 +35,24 @@ struct sockaddr_rxrpc { */ #define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ #define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ -#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */ +#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ #define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ /* * RxRPC control messages + * - If neither abort or accept are specified, the message is a data message. * - terminal messages mean that a user call ID tag can be recycled + * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg() */ -#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */ -#define RXRPC_ABORT 2 /* abort request / notification [terminal] */ -#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */ -#define RXRPC_NET_ERROR 5 /* network error received [terminal] */ -#define RXRPC_BUSY 6 /* server busy received [terminal] */ -#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */ -#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */ -#define RXRPC_ACCEPT 9 /* [Server] accept request */ +#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */ +#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */ +#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */ +#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */ +#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */ +#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */ +#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */ +#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */ +#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */ /* * RxRPC security levels diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 556ec1ea2574..cb3c8fe6acd7 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -286,6 +286,31 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) /* + * The maximum number of SG segments that we will put inside a + * scatterlist (unless chaining is used). Should ideally fit inside a + * single page, to avoid a higher order allocation. We could define this + * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The + * minimum value is 32 + */ +#define SG_CHUNK_SIZE 128 + +/* + * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit + * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. + */ +#ifdef CONFIG_ARCH_HAS_SG_CHAIN +#define SG_MAX_SEGMENTS 2048 +#else +#define SG_MAX_SEGMENTS SG_CHUNK_SIZE +#endif + +#ifdef CONFIG_SG_POOL +void sg_free_table_chained(struct sg_table *table, bool first_chunk); +int sg_alloc_table_chained(struct sg_table *table, int nents, + struct scatterlist *first_chunk); +#endif + +/* * sg page iterator * * Iterates over sg entries page-by-page. On each successful iteration, diff --git a/include/linux/sched.h b/include/linux/sched.h index 52c4847b05e2..62c68e513e39 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -40,7 +40,6 @@ struct sched_param { #include <linux/pid.h> #include <linux/percpu.h> #include <linux/topology.h> -#include <linux/proportions.h> #include <linux/seccomp.h> #include <linux/rcupdate.h> #include <linux/rculist.h> @@ -178,9 +177,11 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void calc_global_load(unsigned long ticks); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void update_cpu_load_nohz(int active); +extern void cpu_load_update_nohz_start(void); +extern void cpu_load_update_nohz_stop(void); #else -static inline void update_cpu_load_nohz(int active) { } +static inline void cpu_load_update_nohz_start(void) { } +static inline void cpu_load_update_nohz_stop(void) { } #endif extern void dump_cpu_task(int cpu); @@ -218,9 +219,10 @@ extern void proc_sched_set_task(struct task_struct *p); #define TASK_WAKING 256 #define TASK_PARKED 512 #define TASK_NOLOAD 1024 -#define TASK_STATE_MAX 2048 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN" +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" extern char ___assert_task_state[1 - 2*!!( sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; @@ -372,6 +374,15 @@ extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); extern void scheduler_tick(void); +extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif extern void sched_show_task(struct task_struct *p); @@ -511,6 +522,8 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_HAS_UPROBES 19 /* has uprobes */ #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_REAPED 21 /* mm has been already reaped */ +#define MMF_OOM_NOT_REAPABLE 22 /* mm couldn't be reaped */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) @@ -658,6 +671,7 @@ struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; + atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */ struct list_head thread_head; wait_queue_head_t wait_chldexit; /* for wait4() */ @@ -782,7 +796,11 @@ struct signal_struct { struct tty_audit_buf *tty_audit_buf; #endif - oom_flags_t oom_flags; + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj_min; /* OOM kill score adjustment min value. * Only settable by CAP_SYS_RESOURCE. */ @@ -935,9 +953,19 @@ enum cpu_idle_type { }; /* + * Integer metrics need fixed point arithmetic, e.g., sched/fair + * has a few: load, load_avg, util_avg, freq, and capacity. + * + * We define a basic fixed point arithmetic range, and then formalize + * all these metrics based on that basic range. + */ +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) + +/* * Increase resolution of cpu_capacity calculations */ -#define SCHED_CAPACITY_SHIFT 10 +#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) /* @@ -1199,18 +1227,56 @@ struct load_weight { }; /* - * The load_avg/util_avg accumulates an infinite geometric series. - * 1) load_avg factors frequency scaling into the amount of time that a - * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the - * aggregated such weights of all runnable and blocked sched_entities. - * 2) util_avg factors frequency and cpu scaling into the amount of time - * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. - * For cfs_rq, it is the aggregated such times of all runnable and + * The load_avg/util_avg accumulates an infinite geometric series + * (see __update_load_avg() in kernel/sched/fair.c). + * + * [load_avg definition] + * + * load_avg = runnable% * scale_load_down(load) + * + * where runnable% is the time ratio that a sched_entity is runnable. + * For cfs_rq, it is the aggregated load_avg of all runnable and * blocked sched_entities. - * The 64 bit load_sum can: - * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with - * the highest weight (=88761) always runnable, we should not overflow - * 2) for entity, support any load.weight always runnable + * + * load_avg may also take frequency scaling into account: + * + * load_avg = runnable% * scale_load_down(load) * freq% + * + * where freq% is the CPU frequency normalized to the highest frequency. + * + * [util_avg definition] + * + * util_avg = running% * SCHED_CAPACITY_SCALE + * + * where running% is the time ratio that a sched_entity is running on + * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable + * and blocked sched_entities. + * + * util_avg may also factor frequency scaling and CPU capacity scaling: + * + * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% + * + * where freq% is the same as above, and capacity% is the CPU capacity + * normalized to the greatest capacity (due to uarch differences, etc). + * + * N.B., the above ratios (runnable%, running%, freq%, and capacity%) + * themselves are in the range of [0, 1]. To do fixed point arithmetics, + * we therefore scale them to as large a range as necessary. This is for + * example reflected by util_avg's SCHED_CAPACITY_SCALE. + * + * [Overflow issue] + * + * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities + * with the highest load (=88761), always runnable on a single cfs_rq, + * and should not overflow as the number already hits PID_MAX_LIMIT. + * + * For all other cases (including 32-bit kernels), struct load_weight's + * weight will overflow first before we do, because: + * + * Max(load_avg) <= Max(load.weight) + * + * Then it is the load_weight's responsibility to consider overflow + * issues. */ struct sched_avg { u64 last_update_time, load_sum; @@ -1475,11 +1541,15 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; unsigned :0; /* force alignment to the next boundary */ /* unserialized, strictly 'current' */ unsigned in_execve:1; /* bit to tell LSMs we're in execve */ unsigned in_iowait:1; +#if !defined(TIF_RESTORE_SIGMASK) + unsigned restore_sigmask:1; +#endif #ifdef CONFIG_MEMCG unsigned memcg_may_oom:1; #ifndef CONFIG_SLOB @@ -1596,6 +1666,7 @@ struct task_struct { unsigned long sas_ss_sp; size_t sas_ss_size; + unsigned sas_ss_flags; struct callback_head *task_works; @@ -1871,12 +1942,43 @@ extern int arch_task_struct_size __read_mostly; /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +static inline int tsk_nr_cpus_allowed(struct task_struct *p) +{ + return p->nr_cpus_allowed; +} + #define TNF_MIGRATED 0x01 #define TNF_NO_GROUP 0x02 #define TNF_SHARED 0x04 #define TNF_FAULT_LOCAL 0x08 #define TNF_MIGRATE_FAIL 0x10 +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + + /* + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. + * + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); @@ -2068,6 +2170,9 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime); @@ -2184,6 +2289,7 @@ static inline void memalloc_noio_restore(unsigned int flags) #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ #define TASK_PFA_TEST(name, func) \ @@ -2207,6 +2313,9 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) TASK_PFA_SET(SPREAD_SLAB, spread_slab) TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) +TASK_PFA_TEST(LMK_WAITING, lmk_waiting) +TASK_PFA_SET(LMK_WAITING, lmk_waiting) + /* * task->jobctl flags */ @@ -2303,8 +2412,6 @@ extern unsigned long long notrace sched_clock(void); /* * See the comment in kernel/sched/clock.c */ -extern u64 cpu_clock(int cpu); -extern u64 local_clock(void); extern u64 running_clock(void); extern u64 sched_clock_cpu(int cpu); @@ -2323,6 +2430,16 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } + +static inline u64 cpu_clock(int cpu) +{ + return sched_clock(); +} + +static inline u64 local_clock(void) +{ + return sched_clock(); +} #else /* * Architectures can set this to 1 if they have specified @@ -2337,6 +2454,26 @@ extern void clear_sched_clock_stable(void); extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +/* + * As outlined in clock.c, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +static inline u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline u64 local_clock(void) +{ + return sched_clock_cpu(raw_smp_processor_id()); +} #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING @@ -2546,6 +2683,66 @@ extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); +#ifdef TIF_RESTORE_SIGMASK +/* + * Legacy restore_sigmask accessors. These are inefficient on + * SMP architectures because they require atomic operations. + */ + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); +} + +#else /* TIF_RESTORE_SIGMASK */ + +/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ +static inline void set_restore_sigmask(void) +{ + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + current->restore_sigmask = false; +} +static inline bool test_restore_sigmask(void) +{ + return current->restore_sigmask; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + if (!current->restore_sigmask) + return false; + current->restore_sigmask = false; + return true; +} +#endif + static inline void restore_saved_sigmask(void) { if (test_and_clear_restore_sigmask()) @@ -2575,6 +2772,18 @@ static inline int kill_cad_pid(int sig, int priv) */ static inline int on_sig_stack(unsigned long sp) { + /* + * If the signal stack is SS_AUTODISARM then, by construction, we + * can't be on the signal stack unless user code deliberately set + * SS_AUTODISARM when we were already on it. + * + * This improves reliability: if user state gets corrupted such that + * the stack pointer points very close to the end of the signal stack, + * then this check will enable the signal to be handled anyway. + */ + if (current->sas_ss_flags & SS_AUTODISARM) + return 0; + #ifdef CONFIG_STACK_GROWSUP return sp >= current->sas_ss_sp && sp - current->sas_ss_sp < current->sas_ss_size; @@ -2592,6 +2801,13 @@ static inline int sas_ss_flags(unsigned long sp) return on_sig_stack(sp) ? SS_ONSTACK : 0; } +static inline void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = SS_DISABLE; +} + static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) { if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) @@ -2610,14 +2826,26 @@ extern struct mm_struct * mm_alloc(void); /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct * mm) +static inline void mmdrop(struct mm_struct *mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); +#endif + /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* @@ -2646,7 +2874,14 @@ static inline int copy_thread_tls( } #endif extern void flush_thread(void); -extern void exit_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif extern void exit_files(struct task_struct *); extern void __cleanup_sighand(struct sighand_struct *); @@ -2866,7 +3101,7 @@ static inline int object_is_on_stack(void *obj) return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } -extern void thread_info_cache_init(void); +extern void thread_stack_cache_init(void); #ifdef CONFIG_DEBUG_STACK_USAGE static inline unsigned long stack_not_used(struct task_struct *p) @@ -3240,7 +3475,10 @@ struct update_util_data { u64 time, unsigned long util, unsigned long max); }; -void cpufreq_set_update_util_data(int cpu, struct update_util_data *data); +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned long util, unsigned long max)); +void cpufreq_remove_update_util_hook(int cpu); #endif /* CONFIG_CPU_FREQ */ #endif diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index 35de50a65665..dc5f989be226 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -70,6 +70,8 @@ struct scpi_ops { int (*sensor_get_capability)(u16 *sensors); int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *); int (*sensor_get_value)(u16, u64 *); + int (*device_get_power_state)(u16); + int (*device_set_power_state)(u16, u8); }; #if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index a9414fd49dc6..fcb4c3646173 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -705,4 +705,9 @@ typedef struct sctp_auth_chunk { sctp_authhdr_t auth_hdr; } __packed sctp_auth_chunk_t; +struct sctp_infox { + struct sctp_info *sctpinfo; + struct sctp_association *asoc; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 2296e6b2f690..ecc296c137cd 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -28,19 +28,13 @@ struct seccomp { }; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER -extern int __secure_computing(void); -static inline int secure_computing(void) +extern int __secure_computing(const struct seccomp_data *sd); +static inline int secure_computing(const struct seccomp_data *sd) { if (unlikely(test_thread_flag(TIF_SECCOMP))) - return __secure_computing(); + return __secure_computing(sd); return 0; } - -#define SECCOMP_PHASE1_OK 0 -#define SECCOMP_PHASE1_SKIP 1 - -extern u32 seccomp_phase1(struct seccomp_data *sd); -int seccomp_phase2(u32 phase1_result); #else extern void secure_computing_strict(int this_syscall); #endif @@ -61,7 +55,7 @@ struct seccomp { }; struct seccomp_filter { }; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER -static inline int secure_computing(void) { return 0; } +static inline int secure_computing(struct seccomp_data *sd) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif diff --git a/include/linux/security.h b/include/linux/security.h index 157f0cb1e4d2..7831cd57bcf7 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -71,7 +71,7 @@ struct timezone; /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, int cap, int audit); -extern int cap_settime(const struct timespec *ts, const struct timezone *tz); +extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -208,7 +208,13 @@ int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); -int security_settime(const struct timespec *ts, const struct timezone *tz); +int security_settime64(const struct timespec64 *ts, const struct timezone *tz); +static inline int security_settime(const struct timespec *ts, const struct timezone *tz) +{ + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return security_settime64(&ts64, tz); +} int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_bprm_set_creds(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); @@ -222,10 +228,10 @@ int security_sb_remount(struct super_block *sb, void *data); int security_sb_kern_mount(struct super_block *sb, int flags, void *data); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); -int security_sb_mount(const char *dev_name, struct path *path, +int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data); int security_sb_umount(struct vfsmount *mnt, int flags); -int security_sb_pivotroot(struct path *old_path, struct path *new_path); +int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts, unsigned long kern_flags, @@ -234,7 +240,7 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb); int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); int security_dentry_init_security(struct dentry *dentry, int mode, - struct qstr *name, void **ctx, + const struct qstr *name, void **ctx, u32 *ctxlen); int security_inode_alloc(struct inode *inode); @@ -462,10 +468,18 @@ static inline int security_syslog(int type) return 0; } +static inline int security_settime64(const struct timespec64 *ts, + const struct timezone *tz) +{ + return cap_settime(ts, tz); +} + static inline int security_settime(const struct timespec *ts, const struct timezone *tz) { - return cap_settime(ts, tz); + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return cap_settime(&ts64, tz); } static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) @@ -530,7 +544,7 @@ static inline int security_sb_statfs(struct dentry *dentry) return 0; } -static inline int security_sb_mount(const char *dev_name, struct path *path, +static inline int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { @@ -542,8 +556,8 @@ static inline int security_sb_umount(struct vfsmount *mnt, int flags) return 0; } -static inline int security_sb_pivotroot(struct path *old_path, - struct path *new_path) +static inline int security_sb_pivotroot(const struct path *old_path, + const struct path *new_path) { return 0; } @@ -577,7 +591,7 @@ static inline void security_inode_free(struct inode *inode) static inline int security_dentry_init_security(struct dentry *dentry, int mode, - struct qstr *name, + const struct qstr *name, void **ctx, u32 *ctxlen) { @@ -1442,83 +1456,83 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_SECURITY_PATH -int security_path_unlink(struct path *dir, struct dentry *dentry); -int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode); -int security_path_rmdir(struct path *dir, struct dentry *dentry); -int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, +int security_path_unlink(const struct path *dir, struct dentry *dentry); +int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode); +int security_path_rmdir(const struct path *dir, struct dentry *dentry); +int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); -int security_path_truncate(struct path *path); -int security_path_symlink(struct path *dir, struct dentry *dentry, +int security_path_truncate(const struct path *path); +int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name); -int security_path_link(struct dentry *old_dentry, struct path *new_dir, +int security_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry); -int security_path_rename(struct path *old_dir, struct dentry *old_dentry, - struct path *new_dir, struct dentry *new_dentry, +int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry, unsigned int flags); -int security_path_chmod(struct path *path, umode_t mode); -int security_path_chown(struct path *path, kuid_t uid, kgid_t gid); -int security_path_chroot(struct path *path); +int security_path_chmod(const struct path *path, umode_t mode); +int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid); +int security_path_chroot(const struct path *path); #else /* CONFIG_SECURITY_PATH */ -static inline int security_path_unlink(struct path *dir, struct dentry *dentry) +static inline int security_path_unlink(const struct path *dir, struct dentry *dentry) { return 0; } -static inline int security_path_mkdir(struct path *dir, struct dentry *dentry, +static inline int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode) { return 0; } -static inline int security_path_rmdir(struct path *dir, struct dentry *dentry) +static inline int security_path_rmdir(const struct path *dir, struct dentry *dentry) { return 0; } -static inline int security_path_mknod(struct path *dir, struct dentry *dentry, +static inline int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { return 0; } -static inline int security_path_truncate(struct path *path) +static inline int security_path_truncate(const struct path *path) { return 0; } -static inline int security_path_symlink(struct path *dir, struct dentry *dentry, +static inline int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name) { return 0; } static inline int security_path_link(struct dentry *old_dentry, - struct path *new_dir, + const struct path *new_dir, struct dentry *new_dentry) { return 0; } -static inline int security_path_rename(struct path *old_dir, +static inline int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, - struct path *new_dir, + const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) { return 0; } -static inline int security_path_chmod(struct path *path, umode_t mode) +static inline int security_path_chmod(const struct path *path, umode_t mode) { return 0; } -static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) +static inline int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { return 0; } -static inline int security_path_chroot(struct path *path) +static inline int security_path_chroot(const struct path *path) { return 0; } diff --git a/include/linux/selection.h b/include/linux/selection.h index 85193aa8c1e3..8e4624efdb6f 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h @@ -24,10 +24,10 @@ extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); extern int console_blanked; -extern unsigned char color_table[]; -extern int default_red[]; -extern int default_grn[]; -extern int default_blu[]; +extern const unsigned char color_table[]; +extern unsigned char default_red[]; +extern unsigned char default_grn[]; +extern unsigned char default_blu[]; extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed); extern u16 screen_glyph(struct vc_data *vc, int offset); diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e0582106ef4f..ead97654c4e9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) static inline int raw_read_seqcount_latch(seqcount_t *s) { - return lockless_dereference(s->sequence); + int seq = READ_ONCE(s->sequence); + /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ + smp_read_barrier_depends(); + return seq; } /** @@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * unsigned seq, idx; * * do { - * seq = lockless_dereference(latch->seq); + * seq = raw_read_seqcount_latch(&latch->seq); * * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 434879759725..48ec7651989b 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -36,6 +36,7 @@ struct plat_serial8250_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, unsigned old); @@ -148,6 +149,7 @@ extern int early_serial8250_setup(struct earlycon_device *device, const char *options); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); +extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); extern int serial8250_do_startup(struct uart_port *port); extern void serial8250_do_shutdown(struct uart_port *port); extern void serial8250_do_pm(struct uart_port *port, unsigned int state, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index cbfcf38e220d..2f44e2013654 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -123,6 +123,7 @@ struct uart_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); int (*startup)(struct uart_port *port); void (*shutdown)(struct uart_port *port); @@ -281,6 +282,8 @@ struct uart_state { enum uart_pm_state pm_state; struct circ_buf xmit; + atomic_t refcount; + wait_queue_head_t remove_wait; struct uart_port *uart_port; }; @@ -349,9 +352,15 @@ struct earlycon_id { extern const struct earlycon_id __earlycon_table[]; extern const struct earlycon_id __earlycon_table_end[]; +#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) +#define EARLYCON_USED_OR_UNUSED __used +#else +#define EARLYCON_USED_OR_UNUSED __maybe_unused +#endif + #define OF_EARLYCON_DECLARE(_name, compat, fn) \ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ - __used __section(__earlycon_table) \ + EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ = { .name = __stringify(_name), \ .compatible = compat, \ .setup = fn } diff --git a/include/linux/sfi.h b/include/linux/sfi.h index d9b436f09925..e0e1597ef9e6 100644 --- a/include/linux/sfi.h +++ b/include/linux/sfi.h @@ -156,6 +156,7 @@ struct sfi_device_table_entry { #define SFI_DEV_TYPE_UART 2 #define SFI_DEV_TYPE_HSI 3 #define SFI_DEV_TYPE_IPC 4 +#define SFI_DEV_TYPE_SD 5 u8 host_num; /* attached to host 0, 1...*/ u16 addr; diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 4d4780c00d34..ff078e7043b6 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -16,8 +16,9 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - struct shared_policy policy; /* NUMA memory alloc policy */ + struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ + struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ struct inode vfs_inode; }; @@ -28,10 +29,14 @@ struct shmem_sb_info { unsigned long max_inodes; /* How many inodes are allowed */ unsigned long free_inodes; /* How many are left for allocation */ spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ + umode_t mode; /* Mount mode for root directory */ + unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ - umode_t mode; /* Mount mode for root directory */ struct mempolicy *mpol; /* default memory policy for mappings */ + spinlock_t shrinklist_lock; /* Protects shrinklist */ + struct list_head shrinklist; /* List of shinkable inodes */ + unsigned long shrinklist_len; /* Length of shrinklist */ }; static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) @@ -49,6 +54,8 @@ extern struct file *shmem_file_setup(const char *name, extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); +extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern bool shmem_mapping(struct address_space *mapping); extern void shmem_unlock_mapping(struct address_space *mapping); @@ -61,6 +68,19 @@ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); +/* Flag allocation requirements to shmem_getpage */ +enum sgp_type { + SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ + SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ + SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ + SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ +}; + +extern int shmem_getpage(struct inode *inode, pgoff_t index, + struct page **pagep, enum sgp_type sgp); + static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) { @@ -68,6 +88,18 @@ static inline struct page *shmem_read_mapping_page( mapping_gfp_mask(mapping)); } +static inline bool shmem_file(struct file *file) +{ + if (!IS_ENABLED(CONFIG_SHMEM)) + return false; + if (!file || !file->f_mapping) + return false; + return shmem_mapping(file->f_mapping); +} + +extern bool shmem_charge(struct inode *inode, long pages); +extern void shmem_uncharge(struct inode *inode, long pages); + #ifdef CONFIG_TMPFS extern int shmem_add_seals(struct file *file, unsigned int seals); @@ -83,4 +115,13 @@ static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) #endif +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +extern bool shmem_huge_enabled(struct vm_area_struct *vma); +#else +static inline bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + return false; +} +#endif + #endif diff --git a/include/linux/signal.h b/include/linux/signal.h index 92557bbce7e7..b63f63eaa39c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -28,6 +28,21 @@ struct sigpending { sigset_t signal; }; +#ifndef HAVE_ARCH_COPY_SIGINFO + +#include <linux/string.h> + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); +} + +#endif + /* * Define some primitives to manipulate sigset_t. */ @@ -385,7 +400,9 @@ int unhandled_signal(struct task_struct *tsk, int sig); #else #define rt_sigmask(sig) sigmask(sig) #endif -#define siginmask(sig, mask) (rt_sigmask(sig) & (mask)) + +#define siginmask(sig, mask) \ + ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) #define SIG_KERNEL_ONLY_MASK (\ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) @@ -406,14 +423,10 @@ int unhandled_signal(struct task_struct *tsk, int sig); rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) -#define sig_kernel_only(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK)) -#define sig_kernel_coredump(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK)) -#define sig_kernel_ignore(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK)) -#define sig_kernel_stop(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK)) +#define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK) +#define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK) +#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK) +#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK) #define sig_user_defined(t, signr) \ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ @@ -432,8 +445,10 @@ int __save_altstack(stack_t __user *, unsigned long); stack_t __user *__uss = uss; \ struct task_struct *t = current; \ put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \ - put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ } while (0); #ifdef CONFIG_PROC_FS diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h new file mode 100644 index 000000000000..f4dfade428f0 --- /dev/null +++ b/include/linux/skb_array.h @@ -0,0 +1,178 @@ +/* + * Definitions for the 'struct skb_array' datastructure. + * + * Author: + * Michael S. Tsirkin <mst@redhat.com> + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Limited-size FIFO of skbs. Can be used more or less whenever + * sk_buff_head can be used, except you need to know the queue size in + * advance. + * Implemented as a type-safe wrapper around ptr_ring. + */ + +#ifndef _LINUX_SKB_ARRAY_H +#define _LINUX_SKB_ARRAY_H 1 + +#ifdef __KERNEL__ +#include <linux/ptr_ring.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#endif + +struct skb_array { + struct ptr_ring ring; +}; + +/* Might be slightly faster than skb_array_full below, but callers invoking + * this in a loop must use a compiler barrier, for example cpu_relax(). + */ +static inline bool __skb_array_full(struct skb_array *a) +{ + return __ptr_ring_full(&a->ring); +} + +static inline bool skb_array_full(struct skb_array *a) +{ + return ptr_ring_full(&a->ring); +} + +static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce(&a->ring, skb); +} + +static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_irq(&a->ring, skb); +} + +static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_bh(&a->ring, skb); +} + +static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_any(&a->ring, skb); +} + +/* Might be slightly faster than skb_array_empty below, but only safe if the + * array is never resized. Also, callers invoking this in a loop must take care + * to use a compiler barrier, for example cpu_relax(). + */ +static inline bool __skb_array_empty(struct skb_array *a) +{ + return !__ptr_ring_peek(&a->ring); +} + +static inline bool skb_array_empty(struct skb_array *a) +{ + return ptr_ring_empty(&a->ring); +} + +static inline bool skb_array_empty_bh(struct skb_array *a) +{ + return ptr_ring_empty_bh(&a->ring); +} + +static inline bool skb_array_empty_irq(struct skb_array *a) +{ + return ptr_ring_empty_irq(&a->ring); +} + +static inline bool skb_array_empty_any(struct skb_array *a) +{ + return ptr_ring_empty_any(&a->ring); +} + +static inline struct sk_buff *skb_array_consume(struct skb_array *a) +{ + return ptr_ring_consume(&a->ring); +} + +static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) +{ + return ptr_ring_consume_irq(&a->ring); +} + +static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) +{ + return ptr_ring_consume_any(&a->ring); +} + +static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) +{ + return ptr_ring_consume_bh(&a->ring); +} + +static inline int __skb_array_len_with_tag(struct sk_buff *skb) +{ + if (likely(skb)) { + int len = skb->len; + + if (skb_vlan_tag_present(skb)) + len += VLAN_HLEN; + + return len; + } else { + return 0; + } +} + +static inline int skb_array_peek_len(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_irq(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_bh(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_any(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) +{ + return ptr_ring_init(&a->ring, size, gfp); +} + +static void __skb_array_destroy_skb(void *ptr) +{ + kfree_skb(ptr); +} + +static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) +{ + return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); +} + +static inline int skb_array_resize_multiple(struct skb_array **rings, + int nrings, int size, gfp_t gfp) +{ + BUILD_BUG_ON(offsetof(struct skb_array, ring)); + return ptr_ring_resize_multiple((struct ptr_ring **)rings, + nrings, size, gfp, + __skb_array_destroy_skb); +} + +static inline void skb_array_cleanup(struct skb_array *a) +{ + ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb); +} + +#endif /* _LINUX_SKB_ARRAY_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 15d0df943466..0f665cb26b50 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -37,6 +37,7 @@ #include <net/flow_dissector.h> #include <linux/splice.h> #include <linux/in6.h> +#include <linux/if_packet.h> #include <net/flow.h> /* The interface for checksum offload between the stack and networking drivers @@ -301,6 +302,11 @@ struct sk_buff; #endif extern int sysctl_max_skb_frags; +/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to + * segment using its current segmentation instead. + */ +#define GSO_BY_FRAGS 0xFFFF + typedef struct skb_frag_struct skb_frag_t; struct skb_frag_struct { @@ -382,14 +388,10 @@ enum { /* generate software time stamp when entering packet scheduling */ SKBTX_SCHED_TSTAMP = 1 << 6, - - /* generate software timestamp on peer data acknowledgment */ - SKBTX_ACK_TSTAMP = 1 << 7, }; #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ - SKBTX_SCHED_TSTAMP | \ - SKBTX_ACK_TSTAMP) + SKBTX_SCHED_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) /* @@ -465,23 +467,29 @@ enum { /* This indicates the tcp segment has CWR set. */ SKB_GSO_TCP_ECN = 1 << 3, - SKB_GSO_TCPV6 = 1 << 4, + SKB_GSO_TCP_FIXEDID = 1 << 4, + + SKB_GSO_TCPV6 = 1 << 5, - SKB_GSO_FCOE = 1 << 5, + SKB_GSO_FCOE = 1 << 6, - SKB_GSO_GRE = 1 << 6, + SKB_GSO_GRE = 1 << 7, - SKB_GSO_GRE_CSUM = 1 << 7, + SKB_GSO_GRE_CSUM = 1 << 8, - SKB_GSO_IPIP = 1 << 8, + SKB_GSO_IPXIP4 = 1 << 9, - SKB_GSO_SIT = 1 << 9, + SKB_GSO_IPXIP6 = 1 << 10, - SKB_GSO_UDP_TUNNEL = 1 << 10, + SKB_GSO_UDP_TUNNEL = 1 << 11, - SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, + SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12, - SKB_GSO_TUNNEL_REMCSUM = 1 << 12, + SKB_GSO_PARTIAL = 1 << 13, + + SKB_GSO_TUNNEL_REMCSUM = 1 << 14, + + SKB_GSO_SCTP = 1 << 15, }; #if BITS_PER_LONG > 32 @@ -874,6 +882,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb) return (struct rtable *)skb_dst(skb); } +/* For mangling skb->pkt_type from user space side from applications + * such as nft, tc, etc, we only allow a conservative subset of + * possible pkt_types to be set. +*/ +static inline bool skb_pkt_type_ok(u32 ptype) +{ + return ptype <= PACKET_OTHERHOST; +} + void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); @@ -1062,6 +1079,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) } void __skb_get_hash(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen); @@ -1325,6 +1343,16 @@ static inline int skb_header_cloned(const struct sk_buff *skb) return dataref != 1; } +static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + + if (skb_header_cloned(skb)) + return pskb_expand_head(skb, 0, 0, pri); + + return 0; +} + /** * skb_header_release - release reference to header * @skb: buffer to operate on @@ -2457,7 +2485,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, static inline struct page *dev_alloc_pages(unsigned int order) { - return __dev_alloc_pages(GFP_ATOMIC, order); + return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); } /** @@ -2475,7 +2503,7 @@ static inline struct page *__dev_alloc_page(gfp_t gfp_mask) static inline struct page *dev_alloc_page(void) { - return __dev_alloc_page(GFP_ATOMIC); + return dev_alloc_pages(0); } /** @@ -2819,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb) __skb_linearize(skb) : 0; } +static __always_inline void +__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_block_sub(skb->csum, + csum_partial(start, len, 0), off); + else if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start_offset(skb) < 0) + skb->ip_summed = CHECKSUM_NONE; +} + /** * skb_postpull_rcsum - update checksum for received skb after pull * @skb: buffer to update @@ -2829,34 +2869,55 @@ static inline int skb_linearize_cow(struct sk_buff *skb) * update the CHECKSUM_COMPLETE checksum, or set ip_summed to * CHECKSUM_NONE so that it can be recomputed from scratch. */ - static inline void skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); - else if (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_start_offset(skb) < 0) - skb->ip_summed = CHECKSUM_NONE; + __skb_postpull_rcsum(skb, start, len, 0); } -unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +static __always_inline void +__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_block_add(skb->csum, + csum_partial(start, len, 0), off); +} +/** + * skb_postpush_rcsum - update checksum for received skb after push + * @skb: buffer to update + * @start: start of data after push + * @len: length of data pushed + * + * After doing a push on a received packet, you need to call this to + * update the CHECKSUM_COMPLETE checksum. + */ static inline void skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - /* For performing the reverse operation to skb_postpull_rcsum(), - * we can instead of ... - * - * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); - * - * ... just use this equivalent version here to save a few - * instructions. Feeding csum of 0 in csum_partial() and later - * on adding skb->csum is equivalent to feed skb->csum in the - * first place. - */ - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_partial(start, len, skb->csum); + __skb_postpush_rcsum(skb, start, len, 0); +} + +unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); + +/** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update + * @len: length of data pulled + * + * This function performs an skb_push on the packet and updates + * the CHECKSUM_COMPLETE checksum. It should be used on + * receive path processing instead of skb_push unless you know + * that the checksum difference is zero (e.g., a valid IP header) + * or you are setting ip_summed to CHECKSUM_NONE. + */ +static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, + unsigned int len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; } /** @@ -2949,7 +3010,12 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); +void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); +static inline void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb) +{ + __skb_free_datagram_locked(sk, skb, 0); +} int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); @@ -2972,11 +3038,14 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); +bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, + gfp_t gfp); static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { @@ -3584,7 +3653,10 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) * Keeps track of level of encapsulation of network headers. */ struct skb_gso_cb { - int mac_offset; + union { + int mac_offset; + int data_offset; + }; int encap_level; __wsum csum; __u16 csum_start; diff --git a/include/linux/slab.h b/include/linux/slab.h index 508bd827e6dc..4293808d8cfb 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,6 +155,18 @@ void kfree(const void *); void kzfree(const void *); size_t ksize(const void *); +#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR +const char *__check_heap_object(const void *ptr, unsigned long n, + struct page *page); +#else +static inline const char *__check_heap_object(const void *ptr, + unsigned long n, + struct page *page) +{ + return NULL; +} +#endif + /* * Some archs want to perform DMA into kmalloc caches and need a guaranteed * alignment larger than the alignment of a 64-bit integer. @@ -315,8 +327,8 @@ static __always_inline int kmalloc_index(size_t size) } #endif /* !CONFIG_SLOB */ -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; -void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; +void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); /* @@ -339,8 +351,8 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; +void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) { @@ -354,12 +366,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f #endif #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) __assume_slab_alignment; + int node, size_t size) __assume_slab_alignment __malloc; #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, @@ -392,10 +404,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #ifdef CONFIG_TRACING -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #else static __always_inline void * kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) @@ -565,6 +577,8 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { if (size != 0 && n > SIZE_MAX / size) return NULL; + if (__builtin_constant_p(n) && __builtin_constant_p(size)) + return kmalloc(n * size, flags); return __kmalloc(n * size, flags); } diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 9edbbf352340..4ad2c5a26399 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -80,11 +80,16 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif +#ifdef CONFIG_SLAB_FREELIST_RANDOM + unsigned int *random_seq; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, - void *x) { + void *x) +{ void *object = x - (x - page->s_mem) % cache->size; void *last_object = page->s_mem + (cache->num - 1) * cache->size; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 665cd0cd18b8..75f56c2ef2d4 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -99,6 +99,15 @@ struct kmem_cache { */ int remote_node_defrag_ratio; #endif + +#ifdef CONFIG_SLAB_FREELIST_RANDOM + unsigned int *random_seq; +#endif + +#ifdef CONFIG_KASAN + struct kasan_cache kasan_info; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; @@ -111,34 +120,20 @@ static inline void sysfs_slab_remove(struct kmem_cache *s) } #endif - -/** - * virt_to_obj - returns address of the beginning of object. - * @s: object's kmem_cache - * @slab_page: address of slab page - * @x: address within object memory range - * - * Returns address of the beginning of object - */ -static inline void *virt_to_obj(struct kmem_cache *s, - const void *slab_page, - const void *x) -{ - return (void *)x - ((x - slab_page) % s->size); -} - void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason); +void *fixup_red_left(struct kmem_cache *s, void *p); + static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, void *x) { void *object = x - (x - page_address(page)) % cache->size; void *last_object = page_address(page) + (page->objects - 1) * cache->size; - if (unlikely(object > last_object)) - return last_object; - else - return object; + void *result = (unlikely(object > last_object)) ? last_object : object; + + result = fixup_red_left(cache, result); + return result; } #endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 76199b75d584..e302c447e057 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -1,6 +1,16 @@ #ifndef __SMC91X_H__ #define __SMC91X_H__ +/* + * These bits define which access sizes a platform can support, rather + * than the maximal access size. So, if your platform can do 16-bit + * and 32-bit accesses to the SMC91x device, but not 8-bit, set both + * SMC91X_USE_16BIT and SMC91X_USE_32BIT. + * + * The SMC91x driver requires at least one of SMC91X_USE_8BIT or + * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is + * an invalid configuration. + */ #define SMC91X_USE_8BIT (1 << 0) #define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_32BIT (1 << 2) diff --git a/include/linux/smp.h b/include/linux/smp.h index c4414074bd88..eccae4690f41 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void); void smp_setup_processor_id(void); +/* SMP core functions */ +int smpcfd_prepare_cpu(unsigned int cpu); +int smpcfd_dead_cpu(unsigned int cpu); +int smpcfd_dying_cpu(unsigned int cpu); + #endif /* __LINUX_SMP_H */ diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d0cb6d189a0a..cbb0f06c41b2 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -26,6 +26,8 @@ struct qcom_smd_device { struct qcom_smd_channel *channel; }; +typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t); + /** * struct qcom_smd_driver - smd driver struct * @driver: underlying device driver @@ -42,16 +44,71 @@ struct qcom_smd_driver { int (*probe)(struct qcom_smd_device *dev); void (*remove)(struct qcom_smd_device *dev); - int (*callback)(struct qcom_smd_device *, const void *, size_t); + qcom_smd_cb_t callback; }; +#if IS_ENABLED(CONFIG_QCOM_SMD) + int qcom_smd_driver_register(struct qcom_smd_driver *drv); void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); +struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel, + const char *name, + qcom_smd_cb_t cb); +void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel); +void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data); +int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); + + +#else + +static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv) +{ + return -ENXIO; +} + +static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline struct qcom_smd_channel * +qcom_smd_open_channel(struct qcom_smd_channel *channel, + const char *name, + qcom_smd_cb_t cb) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return NULL; +} + +void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return NULL; +} + +void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int qcom_smd_send(struct qcom_smd_channel *channel, + const void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return -ENXIO; +} + +#endif + #define module_qcom_smd_driver(__smd_driver) \ module_driver(__smd_driver, qcom_smd_driver_register, \ qcom_smd_driver_unregister) -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); #endif diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index f35e1512fcaa..7b88697929e9 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -1,12 +1,17 @@ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ +#include <linux/errno.h> + +struct device_node; struct qcom_smem_state; struct qcom_smem_state_ops { int (*update_bits)(void *, u32, u32); }; +#ifdef CONFIG_QCOM_SMEM_STATE + struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); void qcom_smem_state_put(struct qcom_smem_state *); @@ -15,4 +20,34 @@ int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 val struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data); void qcom_smem_state_unregister(struct qcom_smem_state *state); +#else + +static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, + const char *con_id, unsigned *bit) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_put(struct qcom_smem_state *state) +{ +} + +static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state, + u32 mask, u32 value) +{ + return -EINVAL; +} + +static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, + const struct qcom_smem_state_ops *ops, void *data) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_unregister(struct qcom_smem_state *state) +{ +} + +#endif + #endif diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h new file mode 100644 index 000000000000..a37bc5538f19 --- /dev/null +++ b/include/linux/soc/qcom/wcnss_ctrl.h @@ -0,0 +1,8 @@ +#ifndef __WCNSS_CTRL_H__ +#define __WCNSS_CTRL_H__ + +#include <linux/soc/qcom/smd.h> + +struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb); + +#endif diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h new file mode 100644 index 000000000000..7b8b280c181b --- /dev/null +++ b/include/linux/soc/renesas/rcar-sysc.h @@ -0,0 +1,16 @@ +#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__ +#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__ + +#include <linux/types.h> + +struct rcar_sysc_ch { + u16 chan_offs; + u8 chan_bit; + u8 isr_bit; +}; + +int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch); +int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch); +void rcar_sysc_init(phys_addr_t base, u32 syscier); + +#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */ diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 4018b48f2b3b..a0596ca0e80a 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -36,6 +36,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) { switch (sk->sk_family) { case AF_INET: + if (sk->sk_type == SOCK_RAW) + return SKNLGRP_NONE; + switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET_TCP_DESTROY; @@ -45,6 +48,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) return SKNLGRP_NONE; } case AF_INET6: + if (sk->sk_type == SOCK_RAW) + return SKNLGRP_NONE; + switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET6_TCP_DESTROY; diff --git a/include/linux/socket.h b/include/linux/socket.h index 73bf6c6a833b..b5cc5a6d7011 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -201,8 +201,9 @@ struct ucred { #define AF_NFC 39 /* NFC sockets */ #define AF_VSOCK 40 /* vSockets */ #define AF_KCM 41 /* Kernel Connection Multiplexor*/ +#define AF_QIPCRTR 42 /* Qualcomm IPC Router */ -#define AF_MAX 42 /* For now.. */ +#define AF_MAX 43 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -249,6 +250,7 @@ struct ucred { #define PF_NFC AF_NFC #define PF_VSOCK AF_VSOCK #define PF_KCM AF_KCM +#define PF_QIPCRTR AF_QIPCRTR #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 857a9a1d82b5..072cb2aa2413 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -312,8 +312,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @flags: other constraints relevant to this driver * @max_transfer_size: function that returns the max transfer size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. + * @io_mutex: mutex for physical bus access * @bus_lock_spinlock: spinlock for SPI bus locking - * @bus_lock_mutex: mutex for SPI bus locking + * @bus_lock_mutex: mutex for exclusion of multiple callers * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This @@ -372,6 +373,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @unprepare_message: undo any work done by prepare_message(). * @spi_flash_read: to support spi-controller hardwares that provide * accelerated interface to read from flash devices. + * @flash_read_supported: spi device supports flash read * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). @@ -445,6 +447,9 @@ struct spi_master { */ size_t (*max_transfer_size)(struct spi_device *spi); + /* I/O mutex */ + struct mutex io_mutex; + /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; @@ -529,6 +534,7 @@ struct spi_master { struct spi_message *message); int (*spi_flash_read)(struct spi_device *spi, struct spi_flash_read_message *msg); + bool (*flash_read_supported)(struct spi_device *spi); /* * These hooks are for drivers that use a generic implementation @@ -1141,6 +1147,8 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * @opcode_nbits: number of lines to send opcode * @addr_nbits: number of lines to send address * @data_nbits: number of lines for data + * @rx_sg: Scatterlist for receive data read from flash + * @cur_msg_mapped: message has been mapped for DMA */ struct spi_flash_read_message { void *buf; @@ -1153,12 +1161,16 @@ struct spi_flash_read_message { u8 opcode_nbits; u8 addr_nbits; u8 data_nbits; + struct sg_table rx_sg; + bool cur_msg_mapped; }; /* SPI core interface for flash read support */ static inline bool spi_flash_read_supported(struct spi_device *spi) { - return spi->master->spi_flash_read ? true : false; + return spi->master->spi_flash_read && + (!spi->master->flash_read_supported || + spi->master->flash_read_supported(spi)); } int spi_flash_read(struct spi_device *spi, diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 8b3ac0d718eb..0d9848de677d 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -6,6 +6,7 @@ #endif #include <asm/processor.h> /* for cpu_relax() */ +#include <asm/barrier.h> /* * include/linux/spinlock_up.h - UP-debug version of spinlocks. @@ -25,6 +26,11 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define arch_spin_is_locked(x) ((x)->slock == 0) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + smp_cond_load_acquire(&lock->slock, VAL); +} + static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; @@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) +#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0) /* for sched/core.c and kernel_lock.c: */ # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) @@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #define arch_read_can_lock(lock) (((void)(lock), 1)) #define arch_write_can_lock(lock) (((void)(lock), 1)) -#define arch_spin_unlock_wait(lock) \ - do { cpu_relax(); } while (arch_spin_is_locked(lock)) - #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/stm.h b/include/linux/stm.h index 1a79ed8e43da..8369d8a8cabd 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -50,6 +50,8 @@ struct stm_device; * @sw_end: last STP master available to software * @sw_nchannels: number of STP channels per master * @sw_mmiosz: size of one channel's IO space, for mmap, optional + * @hw_override: masters in the STP stream will not match the ones + * assigned by software, but are up to the STM hardware * @packet: callback that sends an STP packet * @mmio_addr: mmap callback, optional * @link: called when a new stm_source gets linked to us, optional @@ -85,6 +87,7 @@ struct stm_data { unsigned int sw_end; unsigned int sw_nchannels; unsigned int sw_mmiosz; + unsigned int hw_override; ssize_t (*packet)(struct stm_data *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index e6bc30a42a74..705840e0438f 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -135,7 +135,12 @@ struct plat_stmmacenet_data { void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); + void (*suspend)(struct platform_device *pdev, void *priv); + void (*resume)(struct platform_device *pdev, void *priv); void *bsp_priv; struct stmmac_axi *axi; + int has_gmac4; + bool tso_en; + int mac_port_sel_speed; }; #endif diff --git a/include/linux/string.h b/include/linux/string.h index d3993a79a325..26b6f6a66f83 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -119,7 +119,7 @@ char *strreplace(char *s, char old, char new); extern void kfree_const(const void *x); -extern char *kstrdup(const char *s, gfp_t gfp); +extern char *kstrdup(const char *s, gfp_t gfp) __malloc; extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp); diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index dabe643eb5fa..5ce9538f290e 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -3,6 +3,8 @@ #include <linux/types.h> +struct file; + /* Descriptions of the types of units to * print in */ enum string_size_units { @@ -68,4 +70,8 @@ static inline int string_escape_str_any_np(const char *src, char *dst, return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); } +char *kstrdup_quotable(const char *src, gfp_t gfp); +char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); +char *kstrdup_quotable_file(struct file *file, gfp_t gfp); + #endif diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h new file mode 100644 index 000000000000..7c2d95170d01 --- /dev/null +++ b/include/linux/stringhash.h @@ -0,0 +1,78 @@ +#ifndef __LINUX_STRINGHASH_H +#define __LINUX_STRINGHASH_H + +#include <linux/compiler.h> /* For __pure */ +#include <linux/types.h> /* For u32, u64 */ +#include <linux/hash.h> + +/* + * Routines for hashing strings of bytes to a 32-bit hash value. + * + * These hash functions are NOT GUARANTEED STABLE between kernel + * versions, architectures, or even repeated boots of the same kernel. + * (E.g. they may depend on boot-time hardware detection or be + * deliberately randomized.) + * + * They are also not intended to be secure against collisions caused by + * malicious inputs; much slower hash functions are required for that. + * + * They are optimized for pathname components, meaning short strings. + * Even if a majority of files have longer names, the dynamic profile of + * pathname components skews short due to short directory names. + * (E.g. /usr/lib/libsesquipedalianism.so.3.141.) + */ + +/* + * Version 1: one byte at a time. Example of use: + * + * unsigned long hash = init_name_hash; + * while (*p) + * hash = partial_name_hash(tolower(*p++), hash); + * hash = end_name_hash(hash); + * + * Although this is designed for bytes, fs/hfsplus/unicode.c + * abuses it to hash 16-bit values. + */ + +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define init_name_hash(salt) (unsigned long)(salt) + +/* partial hash update function. Assume roughly 4 bits per character */ +static inline unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits). This also has the property (wanted by the dcache) + * that the msbits make a good hash table index. + */ +static inline unsigned long end_name_hash(unsigned long hash) +{ + return __hash_32((unsigned int)hash); +} + +/* + * Version 2: One word (32 or 64 bits) at a time. + * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h> + * exists, which describes major Linux platforms like x86 and ARM), then + * this computes a different hash function much faster. + * + * If not set, this falls back to a wrapper around the preceding. + */ +extern unsigned int __pure full_name_hash(const void *salt, const char *, unsigned int); + +/* + * A hash_len is a u64 with the hash of a string in the low + * half and the length in the high half. + */ +#define hashlen_hash(hashlen) ((u32)(hashlen)) +#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) +#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) + +/* Return the "hash_len" (hash and length) of a null-terminated string */ +extern u64 __pure hashlen_string(const void *salt, const char *name); + +#endif /* __LINUX_STRINGHASH_H */ diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 6a241a277249..4ccf184e971f 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -37,7 +37,6 @@ struct rpcsec_gss_info; /* auth_cred ac_flags bits */ enum { - RPC_CRED_NO_CRKEY_TIMEOUT = 0, /* underlying cred has no key timeout */ RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */ RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying key will expire soon */ @@ -82,6 +81,9 @@ struct rpc_cred { #define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 +/* rpc_auth au_flags */ +#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ + /* * Client authentication handle */ @@ -107,6 +109,9 @@ struct rpc_auth { /* per-flavor data */ }; +/* rpc_auth au_flags */ +#define RPCAUTH_AUTH_DATATOUCH 0x00000002 + struct rpc_auth_create_args { rpc_authflavor_t pseudoflavor; const char *target_name; @@ -127,7 +132,7 @@ struct rpc_authops { void (*destroy)(struct rpc_auth *); struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); - struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int); + struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t); int (*list_pseudoflavors)(rpc_authflavor_t *, int); rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); int (*flavor2info)(rpc_authflavor_t, @@ -167,6 +172,7 @@ void rpc_destroy_authunix(void); struct rpc_cred * rpc_lookup_cred(void); struct rpc_cred * rpc_lookup_cred_nonblock(void); +struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t); struct rpc_cred * rpc_lookup_machine_cred(const char *service_name); int rpcauth_register(const struct rpc_authops *); int rpcauth_unregister(const struct rpc_authops *); @@ -178,7 +184,7 @@ rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t, int rpcauth_get_gssinfo(rpc_authflavor_t, struct rpcsec_gss_info *); int rpcauth_list_flavors(rpc_authflavor_t *, int); -struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); +struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t); void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); @@ -195,15 +201,34 @@ void rpcauth_destroy_credcache(struct rpc_auth *); void rpcauth_clear_credcache(struct rpc_cred_cache *); int rpcauth_key_timeout_notify(struct rpc_auth *, struct rpc_cred *); -bool rpcauth_cred_key_to_expire(struct rpc_cred *); +bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *); char * rpcauth_stringify_acceptor(struct rpc_cred *); static inline struct rpc_cred * get_rpccred(struct rpc_cred *cred) { - atomic_inc(&cred->cr_count); + if (cred != NULL) + atomic_inc(&cred->cr_count); return cred; } +/** + * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer + * @cred: cred of which to take a reference + * + * In some cases, we may have a pointer to a credential to which we + * want to take a reference, but don't already have one. Because these + * objects are freed using RCU, we can access the cr_count while its + * on its way to destruction and only take a reference if it's not already + * zero. + */ +static inline struct rpc_cred * +get_rpccred_rcu(struct rpc_cred *cred) +{ + if (atomic_inc_not_zero(&cred->cr_count)) + return cred; + return NULL; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_AUTH_H */ diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index ed03c9f7f908..62a60eeacb0a 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -78,8 +78,6 @@ struct cache_detail { struct hlist_head * hash_table; rwlock_t hash_lock; - atomic_t inuse; /* active user-space update or lookup */ - char *name; void (*cache_put)(struct kref *); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 9a7ddbaf116e..5c02b0691587 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -137,8 +137,6 @@ struct rpc_create_args { #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) struct rpc_clnt *rpc_create(struct rpc_create_args *args); -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, - struct rpc_xprt *xprt); struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, const struct rpc_program *, u32); struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); @@ -176,6 +174,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); int rpc_protocol(struct rpc_clnt *); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); +size_t rpc_max_bc_payload(struct rpc_clnt *); unsigned long rpc_get_timeout(struct rpc_clnt *clnt); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); @@ -196,6 +195,8 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, struct rpc_xprt *, void *), void *data); +void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, + unsigned long timeo); const char *rpc_proc_name(const struct rpc_task *task); #endif /* __KERNEL__ */ diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 1f911ccb2a75..68ec78c1aa48 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -73,6 +73,7 @@ u32 gss_delete_sec_context( rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop, u32 service); u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); +bool gss_pseudoflavor_to_datatouch(struct gss_api_mech *, u32 pseudoflavor); char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); struct pf_desc { @@ -81,6 +82,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + bool datatouch; }; /* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 807371357160..59cbf16eaeb5 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -158,9 +158,9 @@ typedef __be32 rpc_fraghdr; /* * Note that RFC 1833 does not put any size restrictions on the - * netid string, but all currently defined netid's fit in 4 bytes. + * netid string, but all currently defined netid's fit in 5 bytes. */ -#define RPCBIND_MAXNETIDLEN (4u) +#define RPCBIND_MAXNETIDLEN (5u) /* * Universal addresses are introduced in RFC 1833 and further spelled diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 05a1809c44d9..817af0b4385e 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -230,6 +230,10 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *, struct rpc_task *); void rpc_wake_up(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); +struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, + struct rpc_wait_queue *, + bool (*)(struct rpc_task *, void *), + void *); struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *, bool (*)(struct rpc_task *, void *), void *); @@ -247,6 +251,7 @@ void rpc_show_tasks(struct net *); int rpc_init_mempool(void); void rpc_destroy_mempool(void); extern struct workqueue_struct *rpciod_workqueue; +extern struct workqueue_struct *xprtiod_workqueue; void rpc_prepare_task(struct rpc_task *task); static inline int rpc_wait_for_completion_task(struct rpc_task *task) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7ca44fb5b675..7321ae933867 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -268,6 +268,7 @@ struct svc_rqst { * cache pages */ #define RQ_VICTIM (5) /* about to be shut down */ #define RQ_BUSY (6) /* request is busy */ +#define RQ_DATA (7) /* request has data */ unsigned long rq_flags; /* flags field */ void * rq_argp; /* decoded arguments */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 3081339968c3..d6917b896d3a 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct xdr_buf *rcvbuf); /* svc_rdma_marshal.c */ -extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *); +extern int svc_rdma_xdr_decode_req(struct xdr_buf *); extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, struct rpcrdma_msg *, enum rpcrdma_errcode, __be32 *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index b7dabc4baafd..ab02a457da1f 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -25,7 +25,6 @@ struct svc_xprt_ops { void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); int (*xpo_secure_port)(struct svc_rqst *); - void (*xpo_adjust_wspace)(struct svc_xprt *); }; struct svc_xprt_class { @@ -69,6 +68,7 @@ struct svc_xprt { struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ + atomic_t xpt_nr_rqsts; /* Number of requests */ struct mutex xpt_mutex; /* to serialize sending data */ spinlock_t xpt_lock; /* protects sk_deferred * and xpt_auth_cache */ @@ -84,6 +84,7 @@ struct svc_xprt { struct net *xpt_net; struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ + struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ }; static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index c00f53a4ccdd..d03932055328 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -16,6 +16,7 @@ #include <linux/sunrpc/cache.h> #include <linux/sunrpc/gss_api.h> #include <linux/hash.h> +#include <linux/stringhash.h> #include <linux/cred.h> struct svc_cred { @@ -165,41 +166,18 @@ extern int svcauth_unix_set_client(struct svc_rqst *rqstp); extern int unix_gid_cache_create(struct net *net); extern void unix_gid_cache_destroy(struct net *net); -static inline unsigned long hash_str(char *name, int bits) +/* + * The <stringhash.h> functions are good enough that we don't need to + * use hash_32() on them; just extracting the high bits is enough. + */ +static inline unsigned long hash_str(char const *name, int bits) { - unsigned long hash = 0; - unsigned long l = 0; - int len = 0; - unsigned char c; - do { - if (unlikely(!(c = *name++))) { - c = (char)len; len = -1; - } - l = (l << 8) | c; - len++; - if ((len & (BITS_PER_LONG/8-1))==0) - hash = hash_long(hash^l, BITS_PER_LONG); - } while (len); - return hash >> (BITS_PER_LONG - bits); + return hashlen_hash(hashlen_string(NULL, name)) >> (32 - bits); } -static inline unsigned long hash_mem(char *buf, int length, int bits) +static inline unsigned long hash_mem(char const *buf, int length, int bits) { - unsigned long hash = 0; - unsigned long l = 0; - int len = 0; - unsigned char c; - do { - if (len == length) { - c = (char)len; len = -1; - } else - c = *buf++; - l = (l << 8) | c; - len++; - if ((len & (BITS_PER_LONG/8-1))==0) - hash = hash_long(hash^l, BITS_PER_LONG); - } while (len); - return hash >> (BITS_PER_LONG - bits); + return full_name_hash(NULL, buf, length) >> (32 - bits); } #endif /* __KERNEL__ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index fb0d212e0d3a..a16070dd03ee 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -142,6 +142,7 @@ struct rpc_xprt_ops { int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); int (*bc_up)(struct svc_serv *serv, struct net *net); + size_t (*bc_maxpayload)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, unsigned int max_reqs); @@ -217,7 +218,8 @@ struct rpc_xprt { struct work_struct task_cleanup; struct timer_list timer; unsigned long last_used, - idle_timeout; + idle_timeout, + max_reconnect_timeout; /* * Send stuff @@ -296,6 +298,7 @@ struct xprt_create { size_t addrlen; const char *servername; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ + struct rpc_xprt_switch *bc_xps; unsigned int flags; }; diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 767190b01363..39267dc3486a 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -52,7 +52,9 @@ #define RPCRDMA_DEF_SLOT_TABLE (128U) #define RPCRDMA_MAX_SLOT_TABLE (256U) -#define RPCRDMA_DEF_INLINE (1024) /* default inline max */ +#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */ +#define RPCRDMA_DEF_INLINE (1024) /* default inline thresh */ +#define RPCRDMA_MAX_INLINE (3068) /* max inline thresh */ /* Memory registration strategies, by number. * This is part of a kernel / user space API. Do not remove. */ diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 0ece4ba06f06..bef3fb0abb8f 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -80,6 +80,7 @@ struct sock_xprt { #define TCP_RPC_REPLY (1UL << 6) #define XPRT_SOCK_CONNECTING 1U +#define XPRT_SOCK_DATA_READY (2) #endif /* __KERNEL__ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8b6ec7ef0854..7693e39b14fe 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -18,12 +18,11 @@ static inline void pm_set_vt_switch(int do_switch) #endif #ifdef CONFIG_VT_CONSOLE_SLEEP -extern int pm_prepare_console(void); +extern void pm_prepare_console(void); extern void pm_restore_console(void); #else -static inline int pm_prepare_console(void) +static inline void pm_prepare_console(void) { - return 0; } static inline void pm_restore_console(void) diff --git a/include/linux/swap.h b/include/linux/swap.h index ad220359f1b0..4a529c984a3f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -157,15 +157,6 @@ enum { #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX -/* - * Ratio between zone->managed_pages and the "gap" that above the per-zone - * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that - * do not meet the (high_wmark + gap) watermark, even which already met the - * high_wmark, in order to provide better per-zone lru behavior. We are ok to - * spend not more than 1% of the memory for this zone balancing "gap". - */ -#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 - #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ @@ -266,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node) static inline void workingset_node_pages_dec(struct radix_tree_node *node) { + VM_BUG_ON(!workingset_node_pages(node)); node->count--; } @@ -281,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node) static inline void workingset_node_shadows_dec(struct radix_tree_node *node) { + VM_BUG_ON(!workingset_node_shadows(node)); node->count -= 1U << RADIX_TREE_COUNT_SHIFT; } @@ -316,6 +309,8 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ +extern unsigned long zone_reclaimable_pages(struct zone *zone); +extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); @@ -323,9 +318,9 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, bool may_swap); -extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, +extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, - struct zone *zone, + pg_data_t *pgdat, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; @@ -333,13 +328,14 @@ extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; #ifdef CONFIG_NUMA -extern int zone_reclaim_mode; +extern int node_reclaim_mode; extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; -extern int zone_reclaim(struct zone *, gfp_t, unsigned int); +extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); #else -#define zone_reclaim_mode 0 -static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) +#define node_reclaim_mode 0 +static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, + unsigned int order) { return 0; } diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 017fced60242..5f81f8a187f2 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -6,7 +6,6 @@ #include <linux/types.h> struct device; -struct dma_attrs; struct page; struct scatterlist; @@ -68,10 +67,10 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, @@ -83,12 +82,13 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, + unsigned long attrs); extern void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h new file mode 100644 index 000000000000..c6ffe8b0725c --- /dev/null +++ b/include/linux/sync_file.h @@ -0,0 +1,57 @@ +/* + * include/linux/sync_file.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_FILE_H +#define _LINUX_SYNC_FILE_H + +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/fence.h> + +struct sync_file_cb { + struct fence_cb cb; + struct fence *fence; + struct sync_file *sync_file; +}; + +/** + * struct sync_file - sync file to export to the userspace + * @file: file representing this fence + * @kref: reference count on fence. + * @name: name of sync_file. Useful for debugging + * @sync_file_list: membership in global file list + * @num_fences: number of sync_pts in the fence + * @wq: wait queue for fence signaling + * @status: 0: signaled, >0:active, <0: error + * @cbs: sync_pts callback information + */ +struct sync_file { + struct file *file; + struct kref kref; + char name[32]; +#ifdef CONFIG_DEBUG_FS + struct list_head sync_file_list; +#endif + int num_fences; + + wait_queue_head_t wq; + atomic_t status; + + struct sync_file_cb cbs[]; +}; + +struct sync_file *sync_file_create(struct fence *fence); + +#endif /* _LINUX_SYNC_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index d795472c54d8..d02239022bd0 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -371,10 +371,10 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, size_t sigsetsize); asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t __user *uinfo); -asmlinkage long sys_kill(int pid, int sig); -asmlinkage long sys_tgkill(int tgid, int pid, int sig); -asmlinkage long sys_tkill(int pid, int sig); -asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo); +asmlinkage long sys_kill(pid_t pid, int sig); +asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig); +asmlinkage long sys_tkill(pid_t pid, int sig); +asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); asmlinkage long sys_sgetmask(void); asmlinkage long sys_ssetmask(int newmask); asmlinkage long sys_signal(int sig, __sighandler_t handler); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index fa7bc29925c9..a4f7203a9017 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -28,6 +28,7 @@ #include <uapi/linux/sysctl.h> /* For the /proc/sys support */ +struct completion; struct ctl_table; struct nsproxy; struct ctl_table_root; @@ -41,6 +42,8 @@ extern int proc_dostring(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec(struct ctl_table *, int, void __user *, size_t *, loff_t *); +extern int proc_douintvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); extern int proc_dointvec_minmax(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec_jiffies(struct ctl_table *, int, diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 1b8a5a7876ce..ee517bef0db0 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -335,11 +335,14 @@ struct thermal_genl_event { * @get_trend: a pointer to a function that reads the sensor temperature trend. * @set_emul_temp: a pointer to a function that sets sensor emulated * temperature. + * @set_trip_temp: a pointer to a function that sets the trip temperature on + * hardware. */ struct thermal_zone_of_device_ops { int (*get_temp)(void *, int *); int (*get_trend)(void *, long *); int (*set_emul_temp)(void *, int); + int (*set_trip_temp)(void *, int, int); }; /** diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index b4c2a485b28a..2b5b10eed74f 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -105,46 +105,30 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK -/* - * An arch can define its own version of set_restore_sigmask() to get the - * job done however works, with or without TIF_RESTORE_SIGMASK. - */ -#define HAVE_SET_RESTORE_SIGMASK 1 - -/** - * set_restore_sigmask() - make sure saved_sigmask processing gets done - * - * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code - * will run before returning to user mode, to process the flag. For - * all callers, TIF_SIGPENDING is already set or it's no harm to set - * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the - * arch code will notice on return to user mode, in case those bits - * are scarce. We set TIF_SIGPENDING here to ensure that the arch - * signal code always gets run when TIF_RESTORE_SIGMASK is set. - */ -static inline void set_restore_sigmask(void) -{ - set_thread_flag(TIF_RESTORE_SIGMASK); - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) +#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES +static inline int arch_within_stack_frames(const void * const stack, + const void * const stackend, + const void *obj, unsigned long len) { - clear_thread_flag(TIF_RESTORE_SIGMASK); + return 0; } -static inline bool test_restore_sigmask(void) -{ - return test_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_and_clear_restore_sigmask(void) +#endif + +#ifdef CONFIG_HARDENED_USERCOPY +extern void __check_object_size(const void *ptr, unsigned long n, + bool to_user); + +static __always_inline void check_object_size(const void *ptr, unsigned long n, + bool to_user) { - return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); + if (!__builtin_constant_p(n)) + __check_object_size(ptr, n, to_user); } -#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */ - -#ifndef HAVE_SET_RESTORE_SIGMASK -#error "no set_restore_sigmask() provided and default one won't work" -#endif +#else +static inline void check_object_size(const void *ptr, unsigned long n, + bool to_user) +{ } +#endif /* CONFIG_HARDENED_USERCOPY */ #endif /* __KERNEL__ */ diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index 0a0d56834c8e..f2293028ab9d 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -71,7 +71,7 @@ struct st_proto_s { enum proto_type type; long (*recv) (void *, struct sk_buff *); unsigned char (*match_packet) (const unsigned char *data); - void (*reg_complete_cb) (void *, char data); + void (*reg_complete_cb) (void *, int data); long (*write) (struct sk_buff *skb); void *priv_data; diff --git a/include/linux/time.h b/include/linux/time.h index 297f09f23896..4cea09d94208 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -205,7 +205,20 @@ struct tm { int tm_yday; }; -void time_to_tm(time_t totalsecs, int offset, struct tm *result); +void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); + +/** + * time_to_tm - converts the calendar time to local broken-down time + * + * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, + * Coordinated Universal Time (UTC). + * @offset offset seconds adding to totalsecs. + * @result pointer to struct tm variable to receive broken-down time + */ +static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) +{ + time64_to_tm(totalsecs, offset, result); +} /** * timespec_to_ns - Convert timespec to nanoseconds diff --git a/include/linux/time64.h b/include/linux/time64.h index 367d5af899e8..7e5d2fa9ac46 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -65,7 +65,6 @@ static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec * # define timespec64_equal timespec_equal # define timespec64_compare timespec_compare # define set_normalized_timespec64 set_normalized_timespec -# define timespec64_add_safe timespec_add_safe # define timespec64_add timespec_add # define timespec64_sub timespec_sub # define timespec64_valid timespec_valid @@ -134,15 +133,6 @@ static inline int timespec64_compare(const struct timespec64 *lhs, const struct extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); -/* - * timespec64_add_safe assumes both values are positive and checks for - * overflow. It will return TIME_T_MAX if the returned value would be - * smaller then either of the arguments. - */ -extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, - const struct timespec64 rhs); - - static inline struct timespec64 timespec64_add(struct timespec64 lhs, struct timespec64 rhs) { @@ -224,4 +214,11 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) #endif +/* + * timespec64_add_safe assumes both values are positive and checks for + * overflow. It will return TIME64_MAX in case of overflow. + */ +extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, + const struct timespec64 rhs); + #endif /* _LINUX_TIME64_H */ diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 96f37bee3bc1..816b7543f81b 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -1,6 +1,8 @@ #ifndef _LINUX_TIMEKEEPING_H #define _LINUX_TIMEKEEPING_H +#include <asm-generic/errno-base.h> + /* Included from linux/ktime.h */ void timekeeping_init(void); @@ -11,8 +13,22 @@ extern int timekeeping_suspended; */ extern void do_gettimeofday(struct timeval *tv); extern int do_settimeofday64(const struct timespec64 *ts); -extern int do_sys_settimeofday(const struct timespec *tv, - const struct timezone *tz); +extern int do_sys_settimeofday64(const struct timespec64 *tv, + const struct timezone *tz); +static inline int do_sys_settimeofday(const struct timespec *tv, + const struct timezone *tz) +{ + struct timespec64 ts64; + + if (!tv) + return do_sys_settimeofday64(NULL, tz); + + if (!timespec_valid(tv)) + return -EINVAL; + + ts64 = timespec_to_timespec64(*tv); + return do_sys_settimeofday64(&ts64, tz); +} /* * Kernel time accessors diff --git a/include/linux/timer.h b/include/linux/timer.h index 61aa61dc410c..51d601f192d4 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -19,7 +19,6 @@ struct timer_list { void (*function)(unsigned long); unsigned long data; u32 flags; - int slack; #ifdef CONFIG_TIMER_STATS int start_pid; @@ -58,11 +57,14 @@ struct timer_list { * workqueue locking issues. It's not meant for executing random crap * with interrupts disabled. Abuse is monitored! */ -#define TIMER_CPUMASK 0x0007FFFF -#define TIMER_MIGRATING 0x00080000 +#define TIMER_CPUMASK 0x0003FFFF +#define TIMER_MIGRATING 0x00040000 #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) -#define TIMER_DEFERRABLE 0x00100000 +#define TIMER_DEFERRABLE 0x00080000 +#define TIMER_PINNED 0x00100000 #define TIMER_IRQSAFE 0x00200000 +#define TIMER_ARRAYSHIFT 22 +#define TIMER_ARRAYMASK 0xFFC00000 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ @@ -70,7 +72,6 @@ struct timer_list { .expires = (_expires), \ .data = (_data), \ .flags = (_flags), \ - .slack = -1, \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } @@ -78,9 +79,15 @@ struct timer_list { #define TIMER_INITIALIZER(_function, _expires, _data) \ __TIMER_INITIALIZER((_function), (_expires), (_data), 0) +#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED) + #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) +#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED) + #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) @@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define init_timer(timer) \ __init_timer((timer), 0) +#define init_timer_pinned(timer) \ + __init_timer((timer), TIMER_PINNED) #define init_timer_deferrable(timer) \ __init_timer((timer), TIMER_DEFERRABLE) +#define init_timer_pinned_deferrable(timer) \ + __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED) #define init_timer_on_stack(timer) \ __init_timer_on_stack((timer), 0) @@ -145,10 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define setup_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), 0) +#define setup_pinned_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_PINNED) +#define setup_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) #define setup_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), 0) +#define setup_pinned_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED) #define setup_deferrable_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) /** * timer_pending - is a timer pending? @@ -169,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); -extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); - -extern void set_timer_slack(struct timer_list *time, int slack_hz); -#define TIMER_NOT_PINNED 0 -#define TIMER_PINNED 1 /* * The jiffies value which is added to now, when there is no timer * in the timer wheel: @@ -257,4 +273,10 @@ unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); unsigned long round_jiffies_up(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j); +#ifdef CONFIG_HOTPLUG_CPU +int timers_dead_cpu(unsigned int cpu); +#else +#define timers_dead_cpu NULL +#endif + #endif diff --git a/include/linux/topology.h b/include/linux/topology.h index afce69296ac0..cb0775e1ee4b 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -54,7 +54,7 @@ int arch_update_cpu_topology(void); /* * If the distance between nodes in a system is larger than RECLAIM_DISTANCE * (in whatever arch specific measurement units returned by node_distance()) - * and zone_reclaim_mode is enabled then the VM will only call zone_reclaim() + * and node_reclaim_mode is enabled then the VM will only call node_reclaim() * on nodes within this distance. */ #define RECLAIM_DISTANCE 30 diff --git a/include/linux/torture.h b/include/linux/torture.h index 7759fc3c622d..6685a73736a2 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -50,6 +50,10 @@ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ +bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, + unsigned long *sum_offl, int *min_onl, int *max_onl); +bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, + unsigned long *sum_onl, int *min_onl, int *max_onl); int torture_onoff_init(long ooholdoff, long oointerval); void torture_onoff_stats(void); bool torture_onoff_failures(void); diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 706e63eea080..da158f06e0b2 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -33,7 +33,12 @@ struct tpm_chip; struct trusted_key_payload; struct trusted_key_options; +enum TPM_OPS_FLAGS { + TPM_OPS_AUTO_STARTUP = BIT(0), +}; + struct tpm_class_ops { + unsigned int flags; const u8 req_complete_mask; const u8 req_complete_val; bool (*req_canceled)(struct tpm_chip *chip, u8 status); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0810f81b6db2..be007610ceb0 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -154,21 +154,6 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc); -struct ring_buffer_event * -trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, - int type, unsigned long len, - unsigned long flags, int pc); -void trace_buffer_unlock_commit(struct trace_array *tr, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc); -void trace_buffer_unlock_commit_regs(struct trace_array *tr, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc, - struct pt_regs *regs); -void trace_current_buffer_discard_commit(struct ring_buffer *buffer, - struct ring_buffer_event *event); void tracing_record_cmdline(struct task_struct *tsk); @@ -229,7 +214,6 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_WAS_ENABLED_BIT, - TRACE_EVENT_FL_USE_CALL_FILTER_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, @@ -244,7 +228,6 @@ enum { * WAS_ENABLED - Set and stays set when an event was ever enabled * (used for module unloading, if a module event is enabled, * it is best to clear the buffers that used it). - * USE_CALL_FILTER - For trace internal events, don't use file filter * TRACEPOINT - Event is a tracepoint * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe @@ -255,7 +238,6 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), - TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), @@ -407,16 +389,12 @@ enum event_trigger_type { ETT_SNAPSHOT = (1 << 1), ETT_STACKTRACE = (1 << 2), ETT_EVENT_ENABLE = (1 << 3), + ETT_EVENT_HIST = (1 << 4), + ETT_HIST_ENABLE = (1 << 5), }; extern int filter_match_preds(struct event_filter *filter, void *rec); -extern int filter_check_discard(struct trace_event_file *file, void *rec, - struct ring_buffer *buffer, - struct ring_buffer_event *event); -extern int call_filter_check_discard(struct trace_event_call *call, void *rec, - struct ring_buffer *buffer, - struct ring_buffer_event *event); extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, void *rec); extern void event_triggers_post_call(struct trace_event_file *file, @@ -450,100 +428,6 @@ trace_trigger_soft_disabled(struct trace_event_file *file) return false; } -/* - * Helper function for event_trigger_unlock_commit{_regs}(). - * If there are event triggers attached to this event that requires - * filtering against its fields, then they wil be called as the - * entry already holds the field information of the current event. - * - * It also checks if the event should be discarded or not. - * It is to be discarded if the event is soft disabled and the - * event was only recorded to process triggers, or if the event - * filter is active and this event did not match the filters. - * - * Returns true if the event is discarded, false otherwise. - */ -static inline bool -__event_trigger_test_discard(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, - enum event_trigger_type *tt) -{ - unsigned long eflags = file->flags; - - if (eflags & EVENT_FILE_FL_TRIGGER_COND) - *tt = event_triggers_call(file, entry); - - if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags)) - ring_buffer_discard_commit(buffer, event); - else if (!filter_check_discard(file, entry, buffer, event)) - return false; - - return true; -} - -/** - * event_trigger_unlock_commit - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event - * @buffer: The ring buffer that the event is being written to - * @event: The event meta data in the ring buffer - * @entry: The event itself - * @irq_flags: The state of the interrupts at the start of the event - * @pc: The state of the preempt count at the start of the event. - * - * This is a helper function to handle triggers that require data - * from the event itself. It also tests the event against filters and - * if the event is soft disabled and should be discarded. - */ -static inline void -event_trigger_unlock_commit(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, unsigned long irq_flags, int pc) -{ - enum event_trigger_type tt = ETT_NONE; - - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); - - if (tt) - event_triggers_post_call(file, tt, entry); -} - -/** - * event_trigger_unlock_commit_regs - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event - * @buffer: The ring buffer that the event is being written to - * @event: The event meta data in the ring buffer - * @entry: The event itself - * @irq_flags: The state of the interrupts at the start of the event - * @pc: The state of the preempt count at the start of the event. - * - * This is a helper function to handle triggers that require data - * from the event itself. It also tests the event against filters and - * if the event is soft disabled and should be discarded. - * - * Same as event_trigger_unlock_commit() but calls - * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). - */ -static inline void -event_trigger_unlock_commit_regs(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, unsigned long irq_flags, int pc, - struct pt_regs *regs) -{ - enum event_trigger_type tt = ETT_NONE; - - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit_regs(file->tr, buffer, event, - irq_flags, pc, regs); - - if (tt) - event_triggers_post_call(file, tt, entry); -} - #ifdef CONFIG_BPF_EVENTS unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx); #else @@ -569,6 +453,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type, int is_signed, int filter_type); extern int trace_add_event_call(struct trace_event_call *call); extern int trace_remove_event_call(struct trace_event_call *call); +extern int trace_event_get_offsets(struct trace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) @@ -605,15 +490,20 @@ extern void perf_trace_del(struct perf_event *event, int flags); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); -extern void *perf_trace_buf_prepare(int size, unsigned short type, - struct pt_regs **regs, int *rctxp); +void perf_trace_buf_update(void *record, u16 type); +void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); + +void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, + struct trace_event_call *call, u64 count, + struct pt_regs *regs, struct hlist_head *head, + struct task_struct *task); static inline void -perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, +perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, u64 count, struct pt_regs *regs, void *head, struct task_struct *task) { - perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task); + perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); } #endif diff --git a/include/linux/tty.h b/include/linux/tty.h index 3b09f235db66..40144f382516 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -228,7 +228,8 @@ struct tty_port { int count; /* Usage count */ wait_queue_head_t open_wait; /* Open waiters */ wait_queue_head_t delta_msr_wait; /* Modem status change */ - unsigned long flags; /* TTY flags ASY_*/ + unsigned long flags; /* User TTY flags ASYNC_ */ + unsigned long iflags; /* Internal flags TTY_PORT_ */ unsigned char console:1, /* port is a console */ low_latency:1; /* optional: tune for latency */ struct mutex mutex; /* Locking */ @@ -242,6 +243,18 @@ struct tty_port { struct kref kref; /* Ref counter */ }; +/* tty_port::iflags bits -- use atomic bit ops */ +#define TTY_PORT_INITIALIZED 0 /* device is initialized */ +#define TTY_PORT_SUSPENDED 1 /* device is suspended */ +#define TTY_PORT_ACTIVE 2 /* device is open */ + +/* + * uart drivers: use the uart_port::status field and the UPSTAT_* defines + * for s/w-based flow control steering and carrier detection status + */ +#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ +#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ + /* * Where all of the state associated with a tty is kept while the tty * is open. Since the termios state should be kept even if the tty @@ -338,7 +351,6 @@ struct tty_file_private { #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ -#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ #define TTY_LDISC_OPEN 11 /* Line discipline is open */ #define TTY_PTY_LOCK 16 /* pty private */ #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ @@ -360,6 +372,16 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val) smp_mb(); } +static inline bool tty_io_error(struct tty_struct *tty) +{ + return test_bit(TTY_IO_ERROR, &tty->flags); +} + +static inline bool tty_throttled(struct tty_struct *tty) +{ + return test_bit(TTY_THROTTLED, &tty->flags); +} + #ifdef CONFIG_TTY extern void console_init(void); extern void tty_kref_put(struct tty_struct *tty); @@ -371,6 +393,7 @@ extern void proc_clear_tty(struct task_struct *p); extern struct tty_struct *get_current_tty(void); /* tty_io.c */ extern int __init tty_init(void); +extern const char *tty_name(const struct tty_struct *tty); #else static inline void console_init(void) { } @@ -391,6 +414,8 @@ static inline struct tty_struct *get_current_tty(void) /* tty_io.c */ static inline int __init tty_init(void) { return 0; } +static inline const char *tty_name(const struct tty_struct *tty) +{ return "(none)"; } #endif extern struct ktermios tty_std_termios; @@ -415,7 +440,6 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) return tty; } -extern const char *tty_name(const struct tty_struct *tty); extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern int __tty_check_change(struct tty_struct *tty, int sig); @@ -457,6 +481,7 @@ extern void tty_buffer_init(struct tty_port *port); extern void tty_buffer_set_lock_subclass(struct tty_port *port); extern bool tty_buffer_restart_work(struct tty_port *port); extern bool tty_buffer_cancel_work(struct tty_port *port); +extern void tty_buffer_flush_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, @@ -537,7 +562,67 @@ static inline struct tty_port *tty_port_get(struct tty_port *port) /* If the cts flow control is enabled, return true. */ static inline bool tty_port_cts_enabled(struct tty_port *port) { - return port->flags & ASYNC_CTS_FLOW; + return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CTS_FLOW, &port->iflags); + else + clear_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline bool tty_port_active(struct tty_port *port) +{ + return test_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline void tty_port_set_active(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_ACTIVE, &port->iflags); + else + clear_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline bool tty_port_check_carrier(struct tty_port *port) +{ + return test_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CHECK_CD, &port->iflags); + else + clear_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline bool tty_port_suspended(struct tty_port *port) +{ + return test_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline void tty_port_set_suspended(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_SUSPENDED, &port->iflags); + else + clear_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline bool tty_port_initialized(struct tty_port *port) +{ + return test_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +static inline void tty_port_set_initialized(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_INITIALIZED, &port->iflags); + else + clear_bit(TTY_PORT_INITIALIZED, &port->iflags); } extern struct tty_struct *tty_port_tty_get(struct tty_port *port); diff --git a/include/linux/types.h b/include/linux/types.h index 70dd3dfde631..baf718324f4a 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -156,7 +156,6 @@ typedef u32 dma_addr_t; typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ fmode_t; -typedef unsigned __bitwise__ oom_flags_t; #ifdef CONFIG_PHYS_ADDR_T_64BIT typedef u64 phys_addr_t; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index df89c9bcba7d..d3a2bb712af3 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) #endif } +static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + raw_write_seqcount_begin(&syncp->seq); +#endif +} + +static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + raw_write_seqcount_end(&syncp->seq); +#endif +} + static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 349557825428..f30c187ed785 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #ifndef user_access_begin #define user_access_begin() do { } while (0) #define user_access_end() do { } while (0) -#define unsafe_get_user(x, ptr) __get_user(x, ptr) -#define unsafe_put_user(x, ptr) __put_user(x, ptr) +#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) +#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) #endif #endif /* __LINUX_UACCESS_H__ */ diff --git a/include/linux/udp.h b/include/linux/udp.h index 87c094961bd5..d1fd8cd39478 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -71,6 +71,14 @@ struct udp_sock { */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); void (*encap_destroy)(struct sock *sk); + + /* GRO functions for UDP socket */ + struct sk_buff ** (*gro_receive)(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb); + int (*gro_complete)(struct sock *sk, + struct sk_buff *skb, + int nhoff); }; static inline struct udp_sock *udp_sk(const struct sock *sk) @@ -98,11 +106,11 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } -#define udp_portaddr_for_each_entry(__sk, node, list) \ - hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) +#define udp_portaddr_for_each_entry(__sk, list) \ + hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) -#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \ - hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node) +#define udp_portaddr_for_each_entry_rcu(__sk, list) \ + hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) #define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h index 03835522dfcb..25e9d9216340 100644 --- a/include/linux/uidgid.h +++ b/include/linux/uidgid.h @@ -177,12 +177,12 @@ static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid) static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { - return true; + return uid_valid(uid); } static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { - return true; + return gid_valid(gid); } #endif /* CONFIG_USER_NS */ diff --git a/include/linux/uio.h b/include/linux/uio.h index 1b5d1cd796e2..75b4aaf31a9d 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); -int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes); +#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); diff --git a/include/linux/usb.h b/include/linux/usb.h index 6a9a0c28415d..eba1f10e8cfd 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -374,13 +374,12 @@ struct usb_bus { int devnum_next; /* Next open device number in * round-robin allocation */ + struct mutex devnum_next_mutex; /* devnum_next mutex */ struct usb_devmap devmap; /* device address allocation map */ struct usb_device *root_hub; /* Root hub */ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ - struct mutex usb_address0_mutex; /* unaddressed device mutex */ - int bandwidth_allocated; /* on this bus: how much of the time * reserved for periodic (intr/iso) * requests is used, on average? @@ -720,7 +719,7 @@ extern void usb_enable_ltm(struct usb_device *udev); static inline bool usb_device_supports_ltm(struct usb_device *udev) { - if (udev->speed != USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) + if (udev->speed < USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) return false; return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT; } @@ -1069,7 +1068,7 @@ struct usbdrv_wrap { * for interfaces bound to this driver. * @soft_unbind: if set to 1, the USB core will not kill URBs and disable * endpoints before calling the driver's disconnect method. - * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs + * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs * to initiate lower power link state transitions when an idle timeout * occurs. Device-initiated USB 3.0 link PM will still be allowed. * @@ -1569,7 +1568,7 @@ static inline void usb_fill_bulk_urb(struct urb *urb, * Initializes a interrupt urb with the proper information needed to submit * it to a device. * - * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic + * Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic * encoding of the endpoint interval, and express polling intervals in * microframes (eight per millisecond) rather than in frames (one per * millisecond). @@ -1595,7 +1594,7 @@ static inline void usb_fill_int_urb(struct urb *urb, urb->complete = complete_fn; urb->context = context; - if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) { + if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { /* make sure interval is within allowed range */ interval = clamp(interval, 1, 16); diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 966889a20ea3..e479033bd782 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -180,11 +180,11 @@ struct ehci_regs { * PORTSCx */ /* HOSTPC: offset 0x84 */ - u32 hostpc[1]; /* HOSTPC extension */ + u32 hostpc[0]; /* HOSTPC extension */ #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ - u32 reserved5[16]; + u32 reserved5[17]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 5d4e151c49bf..612dbdfa388e 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -25,6 +25,8 @@ #include <linux/workqueue.h> #include <linux/usb/ch9.h> +#define UDC_TRACE_STR_MAX 512 + struct usb_ep; /** @@ -228,307 +230,49 @@ struct usb_ep { /*-------------------------------------------------------------------------*/ -/** - * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint - * @ep:the endpoint being configured - * @maxpacket_limit:value of maximum packet size limit - * - * This function should be used only in UDC drivers to initialize endpoint - * (usually in probe function). - */ +#if IS_ENABLED(CONFIG_USB_GADGET) +void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit); +int usb_ep_enable(struct usb_ep *ep); +int usb_ep_disable(struct usb_ep *ep); +struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); +void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req); +int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags); +int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req); +int usb_ep_set_halt(struct usb_ep *ep); +int usb_ep_clear_halt(struct usb_ep *ep); +int usb_ep_set_wedge(struct usb_ep *ep); +int usb_ep_fifo_status(struct usb_ep *ep); +void usb_ep_fifo_flush(struct usb_ep *ep); +#else static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, - unsigned maxpacket_limit) -{ - ep->maxpacket_limit = maxpacket_limit; - ep->maxpacket = maxpacket_limit; -} - -/** - * usb_ep_enable - configure endpoint, making it usable - * @ep:the endpoint being configured. may not be the endpoint named "ep0". - * drivers discover endpoints through the ep_list of a usb_gadget. - * - * When configurations are set, or when interface settings change, the driver - * will enable or disable the relevant endpoints. while it is enabled, an - * endpoint may be used for i/o until the driver receives a disconnect() from - * the host or until the endpoint is disabled. - * - * the ep0 implementation (which calls this routine) must ensure that the - * hardware capabilities of each endpoint match the descriptor provided - * for it. for example, an endpoint named "ep2in-bulk" would be usable - * for interrupt transfers as well as bulk, but it likely couldn't be used - * for iso transfers or for endpoint 14. some endpoints are fully - * configurable, with more generic names like "ep-a". (remember that for - * USB, "in" means "towards the USB master".) - * - * returns zero, or a negative error code. - */ + unsigned maxpacket_limit) +{ } static inline int usb_ep_enable(struct usb_ep *ep) -{ - int ret; - - if (ep->enabled) - return 0; - - ret = ep->ops->enable(ep, ep->desc); - if (ret) - return ret; - - ep->enabled = true; - - return 0; -} - -/** - * usb_ep_disable - endpoint is no longer usable - * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0". - * - * no other task may be using this endpoint when this is called. - * any pending and uncompleted requests will complete with status - * indicating disconnect (-ESHUTDOWN) before this call returns. - * gadget drivers must call usb_ep_enable() again before queueing - * requests to the endpoint. - * - * returns zero, or a negative error code. - */ +{ return 0; } static inline int usb_ep_disable(struct usb_ep *ep) -{ - int ret; - - if (!ep->enabled) - return 0; - - ret = ep->ops->disable(ep); - if (ret) - return ret; - - ep->enabled = false; - - return 0; -} - -/** - * usb_ep_alloc_request - allocate a request object to use with this endpoint - * @ep:the endpoint to be used with with the request - * @gfp_flags:GFP_* flags to use - * - * Request objects must be allocated with this call, since they normally - * need controller-specific setup and may even need endpoint-specific - * resources such as allocation of DMA descriptors. - * Requests may be submitted with usb_ep_queue(), and receive a single - * completion callback. Free requests with usb_ep_free_request(), when - * they are no longer needed. - * - * Returns the request, or null if one could not be allocated. - */ +{ return 0; } static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, - gfp_t gfp_flags) -{ - return ep->ops->alloc_request(ep, gfp_flags); -} - -/** - * usb_ep_free_request - frees a request object - * @ep:the endpoint associated with the request - * @req:the request being freed - * - * Reverses the effect of usb_ep_alloc_request(). - * Caller guarantees the request is not queued, and that it will - * no longer be requeued (or otherwise used). - */ + gfp_t gfp_flags) +{ return NULL; } static inline void usb_ep_free_request(struct usb_ep *ep, - struct usb_request *req) -{ - ep->ops->free_request(ep, req); -} - -/** - * usb_ep_queue - queues (submits) an I/O request to an endpoint. - * @ep:the endpoint associated with the request - * @req:the request being submitted - * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't - * pre-allocate all necessary memory with the request. - * - * This tells the device controller to perform the specified request through - * that endpoint (reading or writing a buffer). When the request completes, - * including being canceled by usb_ep_dequeue(), the request's completion - * routine is called to return the request to the driver. Any endpoint - * (except control endpoints like ep0) may have more than one transfer - * request queued; they complete in FIFO order. Once a gadget driver - * submits a request, that request may not be examined or modified until it - * is given back to that driver through the completion callback. - * - * Each request is turned into one or more packets. The controller driver - * never merges adjacent requests into the same packet. OUT transfers - * will sometimes use data that's already buffered in the hardware. - * Drivers can rely on the fact that the first byte of the request's buffer - * always corresponds to the first byte of some USB packet, for both - * IN and OUT transfers. - * - * Bulk endpoints can queue any amount of data; the transfer is packetized - * automatically. The last packet will be short if the request doesn't fill it - * out completely. Zero length packets (ZLPs) should be avoided in portable - * protocols since not all usb hardware can successfully handle zero length - * packets. (ZLPs may be explicitly written, and may be implicitly written if - * the request 'zero' flag is set.) Bulk endpoints may also be used - * for interrupt transfers; but the reverse is not true, and some endpoints - * won't support every interrupt transfer. (Such as 768 byte packets.) - * - * Interrupt-only endpoints are less functional than bulk endpoints, for - * example by not supporting queueing or not handling buffers that are - * larger than the endpoint's maxpacket size. They may also treat data - * toggle differently. - * - * Control endpoints ... after getting a setup() callback, the driver queues - * one response (even if it would be zero length). That enables the - * status ack, after transferring data as specified in the response. Setup - * functions may return negative error codes to generate protocol stalls. - * (Note that some USB device controllers disallow protocol stall responses - * in some cases.) When control responses are deferred (the response is - * written after the setup callback returns), then usb_ep_set_halt() may be - * used on ep0 to trigger protocol stalls. Depending on the controller, - * it may not be possible to trigger a status-stage protocol stall when the - * data stage is over, that is, from within the response's completion - * routine. - * - * For periodic endpoints, like interrupt or isochronous ones, the usb host - * arranges to poll once per interval, and the gadget driver usually will - * have queued some data to transfer at that time. - * - * Returns zero, or a negative error code. Endpoints that are not enabled - * report errors; errors will also be - * reported when the usb peripheral is disconnected. - */ -static inline int usb_ep_queue(struct usb_ep *ep, - struct usb_request *req, gfp_t gfp_flags) -{ - if (WARN_ON_ONCE(!ep->enabled && ep->address)) - return -ESHUTDOWN; - - return ep->ops->queue(ep, req, gfp_flags); -} - -/** - * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint - * @ep:the endpoint associated with the request - * @req:the request being canceled - * - * If the request is still active on the endpoint, it is dequeued and its - * completion routine is called (with status -ECONNRESET); else a negative - * error code is returned. This is guaranteed to happen before the call to - * usb_ep_dequeue() returns. - * - * Note that some hardware can't clear out write fifos (to unlink the request - * at the head of the queue) except as part of disconnecting from usb. Such - * restrictions prevent drivers from supporting configuration changes, - * even to configuration zero (a "chapter 9" requirement). - */ + struct usb_request *req) +{ } +static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, + gfp_t gfp_flags) +{ return 0; } static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) -{ - return ep->ops->dequeue(ep, req); -} - -/** - * usb_ep_set_halt - sets the endpoint halt feature. - * @ep: the non-isochronous endpoint being stalled - * - * Use this to stall an endpoint, perhaps as an error report. - * Except for control endpoints, - * the endpoint stays halted (will not stream any data) until the host - * clears this feature; drivers may need to empty the endpoint's request - * queue first, to make sure no inappropriate transfers happen. - * - * Note that while an endpoint CLEAR_FEATURE will be invisible to the - * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the - * current altsetting, see usb_ep_clear_halt(). When switching altsettings, - * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints. - * - * Returns zero, or a negative error code. On success, this call sets - * underlying hardware state that blocks data transfers. - * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any - * transfer requests are still queued, or if the controller hardware - * (usually a FIFO) still holds bytes that the host hasn't collected. - */ +{ return 0; } static inline int usb_ep_set_halt(struct usb_ep *ep) -{ - return ep->ops->set_halt(ep, 1); -} - -/** - * usb_ep_clear_halt - clears endpoint halt, and resets toggle - * @ep:the bulk or interrupt endpoint being reset - * - * Use this when responding to the standard usb "set interface" request, - * for endpoints that aren't reconfigured, after clearing any other state - * in the endpoint's i/o queue. - * - * Returns zero, or a negative error code. On success, this call clears - * the underlying hardware state reflecting endpoint halt and data toggle. - * Note that some hardware can't support this request (like pxa2xx_udc), - * and accordingly can't correctly implement interface altsettings. - */ +{ return 0; } static inline int usb_ep_clear_halt(struct usb_ep *ep) -{ - return ep->ops->set_halt(ep, 0); -} - -/** - * usb_ep_set_wedge - sets the halt feature and ignores clear requests - * @ep: the endpoint being wedged - * - * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT) - * requests. If the gadget driver clears the halt status, it will - * automatically unwedge the endpoint. - * - * Returns zero on success, else negative errno. - */ -static inline int -usb_ep_set_wedge(struct usb_ep *ep) -{ - if (ep->ops->set_wedge) - return ep->ops->set_wedge(ep); - else - return ep->ops->set_halt(ep, 1); -} - -/** - * usb_ep_fifo_status - returns number of bytes in fifo, or error - * @ep: the endpoint whose fifo status is being checked. - * - * FIFO endpoints may have "unclaimed data" in them in certain cases, - * such as after aborted transfers. Hosts may not have collected all - * the IN data written by the gadget driver (and reported by a request - * completion). The gadget driver may not have collected all the data - * written OUT to it by the host. Drivers that need precise handling for - * fault reporting or recovery may need to use this call. - * - * This returns the number of such bytes in the fifo, or a negative - * errno if the endpoint doesn't use a FIFO or doesn't support such - * precise handling. - */ +{ return 0; } +static inline int usb_ep_set_wedge(struct usb_ep *ep) +{ return 0; } static inline int usb_ep_fifo_status(struct usb_ep *ep) -{ - if (ep->ops->fifo_status) - return ep->ops->fifo_status(ep); - else - return -EOPNOTSUPP; -} - -/** - * usb_ep_fifo_flush - flushes contents of a fifo - * @ep: the endpoint whose fifo is being flushed. - * - * This call may be used to flush the "unclaimed data" that may exist in - * an endpoint fifo after abnormal transaction terminations. The call - * must never be used except when endpoint is not being used for any - * protocol translation. - */ +{ return 0; } static inline void usb_ep_fifo_flush(struct usb_ep *ep) -{ - if (ep->ops->fifo_flush) - ep->ops->fifo_flush(ep); -} - +{ } +#endif /* USB_GADGET */ /*-------------------------------------------------------------------------*/ @@ -582,6 +326,7 @@ struct usb_gadget_ops { * @dev: Driver model state for this abstract device. * @out_epnum: last used out ep number * @in_epnum: last used in ep number + * @mA: last set mA value * @otg_caps: OTG capabilities of this gadget. * @sg_supported: true if we can handle scatter-gather * @is_otg: True if the USB device port uses a Mini-AB jack, so that the @@ -638,6 +383,7 @@ struct usb_gadget { struct device dev; unsigned out_epnum; unsigned in_epnum; + unsigned mA; struct usb_otg_caps *otg_caps; unsigned sg_supported:1; @@ -760,251 +506,44 @@ static inline int gadget_is_otg(struct usb_gadget *g) #endif } -/** - * usb_gadget_frame_number - returns the current frame number - * @gadget: controller that reports the frame number - * - * Returns the usb frame number, normally eleven bits from a SOF packet, - * or negative errno if this device doesn't support this capability. - */ -static inline int usb_gadget_frame_number(struct usb_gadget *gadget) -{ - return gadget->ops->get_frame(gadget); -} +/*-------------------------------------------------------------------------*/ -/** - * usb_gadget_wakeup - tries to wake up the host connected to this gadget - * @gadget: controller used to wake up the host - * - * Returns zero on success, else negative error code if the hardware - * doesn't support such attempts, or its support has not been enabled - * by the usb host. Drivers must return device descriptors that report - * their ability to support this, or hosts won't enable it. - * - * This may also try to use SRP to wake the host and start enumeration, - * even if OTG isn't otherwise in use. OTG devices may also start - * remote wakeup even when hosts don't explicitly enable it. - */ +#if IS_ENABLED(CONFIG_USB_GADGET) +int usb_gadget_frame_number(struct usb_gadget *gadget); +int usb_gadget_wakeup(struct usb_gadget *gadget); +int usb_gadget_set_selfpowered(struct usb_gadget *gadget); +int usb_gadget_clear_selfpowered(struct usb_gadget *gadget); +int usb_gadget_vbus_connect(struct usb_gadget *gadget); +int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA); +int usb_gadget_vbus_disconnect(struct usb_gadget *gadget); +int usb_gadget_connect(struct usb_gadget *gadget); +int usb_gadget_disconnect(struct usb_gadget *gadget); +int usb_gadget_deactivate(struct usb_gadget *gadget); +int usb_gadget_activate(struct usb_gadget *gadget); +#else +static inline int usb_gadget_frame_number(struct usb_gadget *gadget) +{ return 0; } static inline int usb_gadget_wakeup(struct usb_gadget *gadget) -{ - if (!gadget->ops->wakeup) - return -EOPNOTSUPP; - return gadget->ops->wakeup(gadget); -} - -/** - * usb_gadget_set_selfpowered - sets the device selfpowered feature. - * @gadget:the device being declared as self-powered - * - * this affects the device status reported by the hardware driver - * to reflect that it now has a local power supply. - * - * returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) -{ - if (!gadget->ops->set_selfpowered) - return -EOPNOTSUPP; - return gadget->ops->set_selfpowered(gadget, 1); -} - -/** - * usb_gadget_clear_selfpowered - clear the device selfpowered feature. - * @gadget:the device being declared as bus-powered - * - * this affects the device status reported by the hardware driver. - * some hardware may not support bus-powered operation, in which - * case this feature's value can never change. - * - * returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) -{ - if (!gadget->ops->set_selfpowered) - return -EOPNOTSUPP; - return gadget->ops->set_selfpowered(gadget, 0); -} - -/** - * usb_gadget_vbus_connect - Notify controller that VBUS is powered - * @gadget:The device which now has VBUS power. - * Context: can sleep - * - * This call is used by a driver for an external transceiver (or GPIO) - * that detects a VBUS power session starting. Common responses include - * resuming the controller, activating the D+ (or D-) pullup to let the - * host detect that a USB device is attached, and starting to draw power - * (8mA or possibly more, especially after SET_CONFIGURATION). - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget) -{ - if (!gadget->ops->vbus_session) - return -EOPNOTSUPP; - return gadget->ops->vbus_session(gadget, 1); -} - -/** - * usb_gadget_vbus_draw - constrain controller's VBUS power usage - * @gadget:The device whose VBUS usage is being described - * @mA:How much current to draw, in milliAmperes. This should be twice - * the value listed in the configuration descriptor bMaxPower field. - * - * This call is used by gadget drivers during SET_CONFIGURATION calls, - * reporting how much power the device may consume. For example, this - * could affect how quickly batteries are recharged. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) -{ - if (!gadget->ops->vbus_draw) - return -EOPNOTSUPP; - return gadget->ops->vbus_draw(gadget, mA); -} - -/** - * usb_gadget_vbus_disconnect - notify controller about VBUS session end - * @gadget:the device whose VBUS supply is being described - * Context: can sleep - * - * This call is used by a driver for an external transceiver (or GPIO) - * that detects a VBUS power session ending. Common responses include - * reversing everything done in usb_gadget_vbus_connect(). - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) -{ - if (!gadget->ops->vbus_session) - return -EOPNOTSUPP; - return gadget->ops->vbus_session(gadget, 0); -} - -/** - * usb_gadget_connect - software-controlled connect to USB host - * @gadget:the peripheral being connected - * - * Enables the D+ (or potentially D-) pullup. The host will start - * enumerating this gadget when the pullup is active and a VBUS session - * is active (the link is powered). This pullup is always enabled unless - * usb_gadget_disconnect() has been used to disable it. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_connect(struct usb_gadget *gadget) -{ - int ret; - - if (!gadget->ops->pullup) - return -EOPNOTSUPP; - - if (gadget->deactivated) { - /* - * If gadget is deactivated we only save new state. - * Gadget will be connected automatically after activation. - */ - gadget->connected = true; - return 0; - } - - ret = gadget->ops->pullup(gadget, 1); - if (!ret) - gadget->connected = 1; - return ret; -} - -/** - * usb_gadget_disconnect - software-controlled disconnect from USB host - * @gadget:the peripheral being disconnected - * - * Disables the D+ (or potentially D-) pullup, which the host may see - * as a disconnect (when a VBUS session is active). Not all systems - * support software pullup controls. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_disconnect(struct usb_gadget *gadget) -{ - int ret; - - if (!gadget->ops->pullup) - return -EOPNOTSUPP; - - if (gadget->deactivated) { - /* - * If gadget is deactivated we only save new state. - * Gadget will stay disconnected after activation. - */ - gadget->connected = false; - return 0; - } - - ret = gadget->ops->pullup(gadget, 0); - if (!ret) - gadget->connected = 0; - return ret; -} - -/** - * usb_gadget_deactivate - deactivate function which is not ready to work - * @gadget: the peripheral being deactivated - * - * This routine may be used during the gadget driver bind() call to prevent - * the peripheral from ever being visible to the USB host, unless later - * usb_gadget_activate() is called. For example, user mode components may - * need to be activated before the system can talk to hosts. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_deactivate(struct usb_gadget *gadget) -{ - int ret; - - if (gadget->deactivated) - return 0; - - if (gadget->connected) { - ret = usb_gadget_disconnect(gadget); - if (ret) - return ret; - /* - * If gadget was being connected before deactivation, we want - * to reconnect it in usb_gadget_activate(). - */ - gadget->connected = true; - } - gadget->deactivated = true; - - return 0; -} - -/** - * usb_gadget_activate - activate function which is not ready to work - * @gadget: the peripheral being activated - * - * This routine activates gadget which was previously deactivated with - * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) -{ - if (!gadget->deactivated) - return 0; - - gadget->deactivated = false; - - /* - * If gadget has been connected before deactivation, or became connected - * while it was being deactivated, we call usb_gadget_connect(). - */ - if (gadget->connected) - return usb_gadget_connect(gadget); - - return 0; -} +{ return 0; } +#endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ @@ -1034,6 +573,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget) * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, * this driver will be bound to any available UDC. * @pending: UDC core private data used for deferred probe of this driver. + * @match_existing_only: If udc is not found, return an error and don't add this + * gadget driver to list of pending driver * * Devices are disabled till a gadget driver successfully bind()s, which * means the driver will handle setup() requests needed to enumerate (and @@ -1097,6 +638,7 @@ struct usb_gadget_driver { char *udc_name; struct list_head pending; + unsigned match_existing_only:1; }; @@ -1223,9 +765,13 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget, /* utility to simplify map/unmap of usb_requests to/from DMA */ +extern int usb_gadget_map_request_by_dev(struct device *dev, + struct usb_request *req, int is_in); extern int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); +extern void usb_gadget_unmap_request_by_dev(struct device *dev, + struct usb_request *req, int is_in); extern void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index b98f831dcda3..66fc13705ab7 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -181,6 +181,7 @@ struct usb_hcd { * bandwidth_mutex should be dropped after a successful control message * to the device, or resetting the bandwidth after a failed attempt. */ + struct mutex *address0_mutex; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h deleted file mode 100644 index 8c8f6854c993..000000000000 --- a/include/linux/usb/msm_hsusb.h +++ /dev/null @@ -1,200 +0,0 @@ -/* linux/include/asm-arm/arch-msm/hsusb.h - * - * Copyright (C) 2008 Google, Inc. - * Author: Brian Swetland <swetland@google.com> - * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __ASM_ARCH_MSM_HSUSB_H -#define __ASM_ARCH_MSM_HSUSB_H - -#include <linux/extcon.h> -#include <linux/types.h> -#include <linux/usb/otg.h> -#include <linux/clk.h> - -/** - * OTG control - * - * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host - * only configuration. - * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY. - * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware. - * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs. - * - */ -enum otg_control_type { - OTG_NO_CONTROL = 0, - OTG_PHY_CONTROL, - OTG_PMIC_CONTROL, - OTG_USER_CONTROL, -}; - -/** - * PHY used in - * - * INVALID_PHY Unsupported PHY - * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY - * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY - * - */ -enum msm_usb_phy_type { - INVALID_PHY = 0, - CI_45NM_INTEGRATED_PHY, - SNPS_28NM_INTEGRATED_PHY, -}; - -#define IDEV_CHG_MAX 1500 -#define IUNIT 100 - -/** - * Different states involved in USB charger detection. - * - * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection - * process is not yet started. - * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact. - * USB_CHG_STATE_DCD_DONE Data pin contact is detected. - * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects - * between SDP and DCP/CDP). - * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects - * between DCP and CDP). - * USB_CHG_STATE_DETECTED USB charger type is determined. - * - */ -enum usb_chg_state { - USB_CHG_STATE_UNDEFINED = 0, - USB_CHG_STATE_WAIT_FOR_DCD, - USB_CHG_STATE_DCD_DONE, - USB_CHG_STATE_PRIMARY_DONE, - USB_CHG_STATE_SECONDARY_DONE, - USB_CHG_STATE_DETECTED, -}; - -/** - * USB charger types - * - * USB_INVALID_CHARGER Invalid USB charger. - * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port - * on USB2.0 compliant host/hub. - * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger). - * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and - * IDEV_CHG_MAX can be drawn irrespective of USB state. - * - */ -enum usb_chg_type { - USB_INVALID_CHARGER = 0, - USB_SDP_CHARGER, - USB_DCP_CHARGER, - USB_CDP_CHARGER, -}; - -/** - * struct msm_otg_platform_data - platform device data - * for msm_otg driver. - * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as - * "do not overwrite default vaule at this address". - * @phy_init_sz: PHY configuration sequence size. - * @vbus_power: VBUS power on/off routine. - * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). - * @mode: Supported mode (OTG/peripheral/host). - * @otg_control: OTG switch controlled by user/Id pin - */ -struct msm_otg_platform_data { - int *phy_init_seq; - int phy_init_sz; - void (*vbus_power)(bool on); - unsigned power_budget; - enum usb_dr_mode mode; - enum otg_control_type otg_control; - enum msm_usb_phy_type phy_type; - void (*setup_gpio)(enum usb_otg_state state); -}; - -/** - * struct msm_usb_cable - structure for exteternal connector cable - * state tracking - * @nb: hold event notification callback - * @conn: used for notification registration - */ -struct msm_usb_cable { - struct notifier_block nb; - struct extcon_dev *extcon; -}; - -/** - * struct msm_otg: OTG driver data. Shared by HCD and DCD. - * @otg: USB OTG Transceiver structure. - * @pdata: otg device platform data. - * @irq: IRQ number assigned for HSUSB controller. - * @clk: clock struct of usb_hs_clk. - * @pclk: clock struct of usb_hs_pclk. - * @core_clk: clock struct of usb_hs_core_clk. - * @regs: ioremapped register base address. - * @inputs: OTG state machine inputs(Id, SessValid etc). - * @sm_work: OTG state machine work. - * @in_lpm: indicates low power mode (LPM) state. - * @async_int: Async interrupt arrived. - * @cur_power: The amount of mA available from downstream port. - * @chg_work: Charger detection work. - * @chg_state: The state of charger detection process. - * @chg_type: The type of charger attached. - * @dcd_retires: The retry count used to track Data contact - * detection process. - * @manual_pullup: true if VBUS is not routed to USB controller/phy - * and controller driver therefore enables pull-up explicitly before - * starting controller using usbcmd run/stop bit. - * @vbus: VBUS signal state trakining, using extcon framework - * @id: ID signal state trakining, using extcon framework - * @switch_gpio: Descriptor for GPIO used to control external Dual - * SPDT USB Switch. - * @reboot: Used to inform the driver to route USB D+/D- line to Device - * connector - */ -struct msm_otg { - struct usb_phy phy; - struct msm_otg_platform_data *pdata; - int irq; - struct clk *clk; - struct clk *pclk; - struct clk *core_clk; - void __iomem *regs; -#define ID 0 -#define B_SESS_VLD 1 - unsigned long inputs; - struct work_struct sm_work; - atomic_t in_lpm; - int async_int; - unsigned cur_power; - int phy_number; - struct delayed_work chg_work; - enum usb_chg_state chg_state; - enum usb_chg_type chg_type; - u8 dcd_retries; - struct regulator *v3p3; - struct regulator *v1p8; - struct regulator *vddcx; - - struct reset_control *phy_rst; - struct reset_control *link_rst; - int vdd_levels[3]; - - bool manual_pullup; - - struct msm_usb_cable vbus; - struct msm_usb_cable id; - - struct gpio_desc *switch_gpio; - struct notifier_block reboot; -}; - -#endif diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index 0b3da40a525e..d315c8907869 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -142,10 +142,11 @@ enum musb_vbus_id_status { }; #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) -void musb_mailbox(enum musb_vbus_id_status status); +int musb_mailbox(enum musb_vbus_id_status status); #else -static inline void musb_mailbox(enum musb_vbus_id_status status) +static inline int musb_mailbox(enum musb_vbus_id_status status) { + return 0; } #endif diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index de3237fce6b2..5ff9032ee1b4 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,7 +12,7 @@ #include <linux/usb/phy.h> #if IS_ENABLED(CONFIG_OF) -enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np); +enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); @@ -20,7 +20,7 @@ struct device_node *usb_of_get_child_node(struct device_node *parent, int portnum); #else static inline enum usb_dr_mode -of_usb_get_dr_mode_by_phy(struct device_node *phy_np) +of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) { return USB_DR_MODE_UNKNOWN; } diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 24198e16f849..7a0350535cb1 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -72,37 +72,113 @@ enum otg_fsm_timer { NUM_OTG_FSM_TIMERS, }; -/* OTG state machine according to the OTG spec */ +/** + * struct otg_fsm - OTG state machine according to the OTG spec + * + * OTG hardware Inputs + * + * Common inputs for A and B device + * @id: TRUE for B-device, FALSE for A-device. + * @adp_change: TRUE when current ADP measurement (n) value, compared to the + * ADP measurement taken at n-2, differs by more than CADP_THR + * @power_up: TRUE when the OTG device first powers up its USB system and + * ADP measurement taken if ADP capable + * + * A-Device state inputs + * @a_srp_det: TRUE if the A-device detects SRP + * @a_vbus_vld: TRUE when VBUS voltage is in regulation + * @b_conn: TRUE if the A-device detects connection from the B-device + * @a_bus_resume: TRUE when the B-device detects that the A-device is signaling + * a resume (K state) + * B-Device state inputs + * @a_bus_suspend: TRUE when the B-device detects that the A-device has put the + * bus into suspend + * @a_conn: TRUE if the B-device detects a connection from the A-device + * @b_se0_srp: TRUE when the line has been at SE0 for more than the minimum + * time before generating SRP + * @b_ssend_srp: TRUE when the VBUS has been below VOTG_SESS_VLD for more than + * the minimum time before generating SRP + * @b_sess_vld: TRUE when the B-device detects that the voltage on VBUS is + * above VOTG_SESS_VLD + * @test_device: TRUE when the B-device switches to B-Host and detects an OTG + * test device. This must be set by host/hub driver + * + * Application inputs (A-Device) + * @a_bus_drop: TRUE when A-device application needs to power down the bus + * @a_bus_req: TRUE when A-device application wants to use the bus. + * FALSE to suspend the bus + * + * Application inputs (B-Device) + * @b_bus_req: TRUE during the time that the Application running on the + * B-device wants to use the bus + * + * Auxilary inputs (OTG v1.3 only. Obsolete now.) + * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD + * @b_bus_suspend: TRUE when the A-device detects that the B-device has put + * the bus into suspend + * @b_bus_resume: TRUE when the A-device detects that the B-device is signaling + * resume on the bus + * + * OTG Output status. Read only for users. Updated by OTG FSM helpers defined + * in this file + * + * Outputs for Both A and B device + * @drv_vbus: TRUE when A-device is driving VBUS + * @loc_conn: TRUE when the local device has signaled that it is connected + * to the bus + * @loc_sof: TRUE when the local device is generating activity on the bus + * @adp_prb: TRUE when the local device is in the process of doing + * ADP probing + * + * Outputs for B-device state + * @adp_sns: TRUE when the B-device is in the process of carrying out + * ADP sensing + * @data_pulse: TRUE when the B-device is performing data line pulsing + * + * Internal Variables + * + * a_set_b_hnp_en: TRUE when the A-device has successfully set the + * b_hnp_enable bit in the B-device. + * Unused as OTG fsm uses otg->host->b_hnp_enable instead + * b_srp_done: TRUE when the B-device has completed initiating SRP + * b_hnp_enable: TRUE when the B-device has accepted the + * SetFeature(b_hnp_enable) B-device. + * Unused as OTG fsm uses otg->gadget->b_hnp_enable instead + * a_clr_err: Asserted (by application ?) to clear a_vbus_err due to an + * overcurrent condition and causes the A-device to transition + * to a_wait_vfall + */ struct otg_fsm { /* Input */ int id; int adp_change; int power_up; - int test_device; - int a_bus_drop; - int a_bus_req; int a_srp_det; int a_vbus_vld; int b_conn; int a_bus_resume; int a_bus_suspend; int a_conn; - int b_bus_req; int b_se0_srp; int b_ssend_srp; int b_sess_vld; + int test_device; + int a_bus_drop; + int a_bus_req; + int b_bus_req; + /* Auxilary inputs */ int a_sess_vld; int b_bus_resume; int b_bus_suspend; /* Output */ - int data_pulse; int drv_vbus; int loc_conn; int loc_sof; int adp_prb; int adp_sns; + int data_pulse; /* Internal variables */ int a_set_b_hnp_en; @@ -110,7 +186,7 @@ struct otg_fsm { int b_hnp_enable; int a_clr_err; - /* Informative variables */ + /* Informative variables. All unused as of now */ int a_bus_drop_inf; int a_bus_req_inf; int a_clr_err_inf; @@ -134,6 +210,7 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; + bool state_changed; }; struct otg_fsm_ops { diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h deleted file mode 100644 index 376654b5b0f7..000000000000 --- a/include/linux/usb/xhci_pdriver.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef __USB_CORE_XHCI_PDRIVER_H -#define __USB_CORE_XHCI_PDRIVER_H - -/** - * struct usb_xhci_pdata - platform_data for generic xhci platform driver - * - * @usb3_lpm_capable: determines if this xhci platform supports USB3 - * LPM capability - * - */ -struct usb_xhci_pdata { - unsigned usb3_lpm_capable:1; -}; - -#endif /* __USB_CORE_XHCI_PDRIVER_H */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 8297e5b341d8..9217169c64cb 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -72,6 +72,7 @@ extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *); extern int proc_setgroups_show(struct seq_file *m, void *v); extern bool userns_may_setgroups(const struct user_namespace *ns); +extern bool current_in_userns(const struct user_namespace *target_ns); #else static inline struct user_namespace *get_user_ns(struct user_namespace *ns) @@ -100,6 +101,11 @@ static inline bool userns_may_setgroups(const struct user_namespace *ns) { return true; } + +static inline bool current_in_userns(const struct user_namespace *target_ns) +{ + return true; +} #endif #endif /* _LINUX_USER_H */ diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 587480ad41b7..dd66a952e8cd 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -27,8 +27,7 @@ #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) -extern int handle_userfault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long reason); +extern int handle_userfault(struct fault_env *fe, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len); @@ -56,10 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) #else /* CONFIG_USERFAULTFD */ /* mm helpers */ -static inline int handle_userfault(struct vm_area_struct *vma, - unsigned long address, - unsigned int flags, - unsigned long reason) +static inline int handle_userfault(struct fault_env *fe, unsigned long reason) { return VM_FAULT_SIGBUS; } diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 6df2509033d7..2d095fc60204 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -1,7 +1,7 @@ /* * UUID/GUID definition * - * Copyright (C) 2010, Intel Corp. + * Copyright (C) 2010, 2016 Intel Corp. * Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or @@ -12,16 +12,17 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_UUID_H_ #define _LINUX_UUID_H_ #include <uapi/linux/uuid.h> +/* + * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") + * not including trailing NUL. + */ +#define UUID_STRING_LEN 36 static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) { @@ -33,7 +34,17 @@ static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2) return memcmp(&u1, &u2, sizeof(uuid_be)); } +void generate_random_uuid(unsigned char uuid[16]); + extern void uuid_le_gen(uuid_le *u); extern void uuid_be_gen(uuid_be *u); +bool __must_check uuid_is_valid(const char *uuid); + +extern const u8 uuid_le_index[16]; +extern const u8 uuid_be_index[16]; + +int uuid_le_to_bin(const char *uuid, uuid_le *u); +int uuid_be_to_bin(const char *uuid, uuid_be *u); + #endif diff --git a/include/linux/verification.h b/include/linux/verification.h new file mode 100644 index 000000000000..a10549a6c7cd --- /dev/null +++ b/include/linux/verification.h @@ -0,0 +1,49 @@ +/* Signature verification + * + * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_VERIFICATION_H +#define _LINUX_VERIFICATION_H + +/* + * The use to which an asymmetric key is being put. + */ +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE, + VERIFYING_FIRMWARE_SIGNATURE, + VERIFYING_KEXEC_PE_SIGNATURE, + VERIFYING_KEY_SIGNATURE, + VERIFYING_KEY_SELF_SIGNATURE, + VERIFYING_UNSPECIFIED_SIGNATURE, + NR__KEY_BEING_USED_FOR +}; +extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; + +#ifdef CONFIG_SYSTEM_DATA_VERIFICATION + +struct key; + +extern int verify_pkcs7_signature(const void *data, size_t len, + const void *raw_pkcs7, size_t pkcs7_len, + struct key *trusted_keys, + enum key_being_used_for usage, + int (*view_content)(void *ctx, + const void *data, size_t len, + size_t asn1hdrlen), + void *ctx); + +#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION +extern int verify_pefile_signature(const void *pebuf, unsigned pelen, + struct key *trusted_keys, + enum key_being_used_for usage); +#endif + +#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ +#endif /* _LINUX_VERIFY_PEFILE_H */ diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h deleted file mode 100644 index da2049b5161c..000000000000 --- a/include/linux/verify_pefile.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Signed PE file verification - * - * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _LINUX_VERIFY_PEFILE_H -#define _LINUX_VERIFY_PEFILE_H - -#include <crypto/public_key.h> - -extern int verify_pefile_signature(const void *pebuf, unsigned pelen, - struct key *trusted_keyring, - enum key_being_used_for usage, - bool *_trusted); - -#endif /* _LINUX_VERIFY_PEFILE_H */ diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b39a5f3153bd..960bedbdec87 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev); int vga_switcheroo_process_delayed_switch(void); +bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev); enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); @@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; } static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } +static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; } static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 6e6cb0c9d7cb..26c155bb639b 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -149,6 +149,19 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, return __virtio_test_bit(vdev, fbit); } +/** + * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * @vdev: the device + */ +static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +{ + /* + * Note the reverse polarity of the quirk feature (compared to most + * other features), this is for compatibility with legacy systems. + */ + return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); +} + static inline struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *c, const char *n) diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h new file mode 100644 index 000000000000..1c912f85e041 --- /dev/null +++ b/include/linux/virtio_net.h @@ -0,0 +1,101 @@ +#ifndef _LINUX_VIRTIO_NET_H +#define _LINUX_VIRTIO_NET_H + +#include <linux/if_vlan.h> +#include <uapi/linux/virtio_net.h> + +static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + const struct virtio_net_hdr *hdr, + bool little_endian) +{ + unsigned short gso_type = 0; + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + gso_type = SKB_GSO_TCPV4; + break; + case VIRTIO_NET_HDR_GSO_TCPV6: + gso_type = SKB_GSO_TCPV6; + break; + case VIRTIO_NET_HDR_GSO_UDP: + gso_type = SKB_GSO_UDP; + break; + default: + return -EINVAL; + } + + if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) + gso_type |= SKB_GSO_TCP_ECN; + + if (hdr->gso_size == 0) + return -EINVAL; + } + + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start); + u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); + + if (!skb_partial_csum_set(skb, start, off)) + return -EINVAL; + } + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + + skb_shinfo(skb)->gso_size = gso_size; + skb_shinfo(skb)->gso_type = gso_type; + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + return 0; +} + +static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, + struct virtio_net_hdr *hdr, + bool little_endian) +{ + memset(hdr, 0, sizeof(*hdr)); + + if (skb_is_gso(skb)) { + struct skb_shared_info *sinfo = skb_shinfo(skb); + + /* This is a hint as to how much should be linear. */ + hdr->hdr_len = __cpu_to_virtio16(little_endian, + skb_headlen(skb)); + hdr->gso_size = __cpu_to_virtio16(little_endian, + sinfo->gso_size); + if (sinfo->gso_type & SKB_GSO_TCPV4) + hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + else if (sinfo->gso_type & SKB_GSO_TCPV6) + hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (sinfo->gso_type & SKB_GSO_UDP) + hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; + else + return -EINVAL; + if (sinfo->gso_type & SKB_GSO_TCP_ECN) + hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; + } else + hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + if (skb_vlan_tag_present(skb)) + hdr->csum_start = __cpu_to_virtio16(little_endian, + skb_checksum_start_offset(skb) + VLAN_HLEN); + else + hdr->csum_start = __cpu_to_virtio16(little_endian, + skb_checksum_start_offset(skb)); + hdr->csum_offset = __cpu_to_virtio16(little_endian, + skb->csum_offset); + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; + } /* else everything is zero */ + + return 0; +} + +#endif /* _LINUX_VIRTIO_BYTEORDER */ diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h new file mode 100644 index 000000000000..9638bfeb0d1f --- /dev/null +++ b/include/linux/virtio_vsock.h @@ -0,0 +1,154 @@ +#ifndef _LINUX_VIRTIO_VSOCK_H +#define _LINUX_VIRTIO_VSOCK_H + +#include <uapi/linux/virtio_vsock.h> +#include <linux/socket.h> +#include <net/sock.h> +#include <net/af_vsock.h> + +#define VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE 128 +#define VIRTIO_VSOCK_DEFAULT_BUF_SIZE (1024 * 256) +#define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256) +#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) +#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL +#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) + +enum { + VSOCK_VQ_RX = 0, /* for host to guest data */ + VSOCK_VQ_TX = 1, /* for guest to host data */ + VSOCK_VQ_EVENT = 2, + VSOCK_VQ_MAX = 3, +}; + +/* Per-socket state (accessed via vsk->trans) */ +struct virtio_vsock_sock { + struct vsock_sock *vsk; + + /* Protected by lock_sock(sk_vsock(trans->vsk)) */ + u32 buf_size; + u32 buf_size_min; + u32 buf_size_max; + + spinlock_t tx_lock; + spinlock_t rx_lock; + + /* Protected by tx_lock */ + u32 tx_cnt; + u32 buf_alloc; + u32 peer_fwd_cnt; + u32 peer_buf_alloc; + + /* Protected by rx_lock */ + u32 fwd_cnt; + u32 rx_bytes; + struct list_head rx_queue; +}; + +struct virtio_vsock_pkt { + struct virtio_vsock_hdr hdr; + struct work_struct work; + struct list_head list; + void *buf; + u32 len; + u32 off; + bool reply; +}; + +struct virtio_vsock_pkt_info { + u32 remote_cid, remote_port; + struct msghdr *msg; + u32 pkt_len; + u16 type; + u16 op; + u32 flags; + bool reply; +}; + +struct virtio_transport { + /* This must be the first field */ + struct vsock_transport transport; + + /* Takes ownership of the packet */ + int (*send_pkt)(struct virtio_vsock_pkt *pkt); +}; + +ssize_t +virtio_transport_stream_dequeue(struct vsock_sock *vsk, + struct msghdr *msg, + size_t len, + int type); +int +virtio_transport_dgram_dequeue(struct vsock_sock *vsk, + struct msghdr *msg, + size_t len, int flags); + +s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); +s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); + +int virtio_transport_do_socket_init(struct vsock_sock *vsk, + struct vsock_sock *psk); +u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk); +u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk); +u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk); +void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val); +void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val); +void virtio_transport_set_max_buffer_size(struct vsock_sock *vs, u64 val); +int +virtio_transport_notify_poll_in(struct vsock_sock *vsk, + size_t target, + bool *data_ready_now); +int +virtio_transport_notify_poll_out(struct vsock_sock *vsk, + size_t target, + bool *space_available_now); + +int virtio_transport_notify_recv_init(struct vsock_sock *vsk, + size_t target, struct vsock_transport_recv_notify_data *data); +int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, + size_t target, struct vsock_transport_recv_notify_data *data); +int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, + size_t target, struct vsock_transport_recv_notify_data *data); +int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, + size_t target, ssize_t copied, bool data_read, + struct vsock_transport_recv_notify_data *data); +int virtio_transport_notify_send_init(struct vsock_sock *vsk, + struct vsock_transport_send_notify_data *data); +int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, + struct vsock_transport_send_notify_data *data); +int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, + struct vsock_transport_send_notify_data *data); +int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, + ssize_t written, struct vsock_transport_send_notify_data *data); + +u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk); +bool virtio_transport_stream_is_active(struct vsock_sock *vsk); +bool virtio_transport_stream_allow(u32 cid, u32 port); +int virtio_transport_dgram_bind(struct vsock_sock *vsk, + struct sockaddr_vm *addr); +bool virtio_transport_dgram_allow(u32 cid, u32 port); + +int virtio_transport_connect(struct vsock_sock *vsk); + +int virtio_transport_shutdown(struct vsock_sock *vsk, int mode); + +void virtio_transport_release(struct vsock_sock *vsk); + +ssize_t +virtio_transport_stream_enqueue(struct vsock_sock *vsk, + struct msghdr *msg, + size_t len); +int +virtio_transport_dgram_enqueue(struct vsock_sock *vsk, + struct sockaddr_vm *remote_addr, + struct msghdr *msg, + size_t len); + +void virtio_transport_destruct(struct vsock_sock *vsk); + +void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt); +void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); +void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); +u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); +void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); + +#endif /* _LINUX_VIRTIO_VSOCK_H */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ec084321fe09..4d6ec58a8d45 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -23,21 +23,23 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), + FOR_ALL_ZONES(ALLOCSTALL), + FOR_ALL_ZONES(PGSCAN_SKIP), PGFREE, PGACTIVATE, PGDEACTIVATE, PGFAULT, PGMAJFAULT, PGLAZYFREED, - FOR_ALL_ZONES(PGREFILL), - FOR_ALL_ZONES(PGSTEAL_KSWAPD), - FOR_ALL_ZONES(PGSTEAL_DIRECT), - FOR_ALL_ZONES(PGSCAN_KSWAPD), - FOR_ALL_ZONES(PGSCAN_DIRECT), + PGREFILL, + PGSTEAL_KSWAPD, + PGSTEAL_DIRECT, + PGSCAN_KSWAPD, + PGSCAN_DIRECT, PGSCAN_DIRECT_THROTTLE, #ifdef CONFIG_NUMA PGSCAN_ZONE_RECLAIM_FAILED, #endif PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, - PAGEOUTRUN, ALLOCSTALL, PGROTATED, + PAGEOUTRUN, PGROTATED, DROP_PAGECACHE, DROP_SLAB, #ifdef CONFIG_NUMA_BALANCING NUMA_PTE_UPDATES, @@ -70,6 +72,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_FAULT_FALLBACK, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, + THP_FILE_ALLOC, + THP_FILE_MAPPED, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, THP_DEFERRED_SPLIT_PAGE, @@ -100,4 +104,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NR_VM_EVENT_ITEMS }; +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +#define THP_FILE_ALLOC ({ BUILD_BUG(); 0; }) +#define THP_FILE_MAPPED ({ BUILD_BUG(); 0; }) +#endif + #endif /* VM_EVENT_ITEM_H_INCLUDED */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index d1f1d338af20..3d9d786a943c 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -4,10 +4,12 @@ #include <linux/spinlock.h> #include <linux/init.h> #include <linux/list.h> +#include <linux/llist.h> #include <asm/page.h> /* pgprot_t */ #include <linux/rbtree.h> struct vm_area_struct; /* vma defining user mapping in mm_types.h */ +struct notifier_block; /* in notifier.h */ /* bits in flags of vmalloc's vm_struct below */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ @@ -44,7 +46,7 @@ struct vmap_area { unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ - struct list_head purge_list; /* "lazy purge" list */ + struct llist_node purge_list; /* "lazy purge" list */ struct vm_struct *vm; struct rcu_head rcu_head; }; @@ -187,4 +189,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) #define VMALLOC_TOTAL 0UL #endif +int register_vmap_purge_notifier(struct notifier_block *nb); +int unregister_vmap_purge_notifier(struct notifier_block *nb); + #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 73fae8c4a5fb..613771909b6e 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -101,25 +101,42 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_vmacache_event(x) do {} while (0) #endif -#define __count_zone_vm_events(item, zone, delta) \ - __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ - zone_idx(zone), delta) +#define __count_zid_vm_events(item, zid, delta) \ + __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) /* - * Zone based page accounting with per cpu differentials. + * Zone and node-based page accounting with per cpu differentials. */ -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; +extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; +extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { atomic_long_add(x, &zone->vm_stat[item]); - atomic_long_add(x, &vm_stat[item]); + atomic_long_add(x, &vm_zone_stat[item]); +} + +static inline void node_page_state_add(long x, struct pglist_data *pgdat, + enum node_stat_item item) +{ + atomic_long_add(x, &pgdat->vm_stat[item]); + atomic_long_add(x, &vm_node_stat[item]); } static inline unsigned long global_page_state(enum zone_stat_item item) { - long x = atomic_long_read(&vm_stat[item]); + long x = atomic_long_read(&vm_zone_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +static inline unsigned long global_node_page_state(enum node_stat_item item) +{ + long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -160,39 +177,70 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, return x; } -#ifdef CONFIG_NUMA +static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, + enum node_stat_item item) +{ + long x = atomic_long_read(&pgdat->vm_stat[item]); -extern unsigned long node_page_state(int node, enum zone_stat_item item); -extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); +#ifdef CONFIG_SMP + int cpu; + for_each_online_cpu(cpu) + x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; -#else + if (x < 0) + x = 0; +#endif + return x; +} -#define node_page_state(node, item) global_page_state(item) -#define zone_statistics(_zl, _z, gfp) do { } while (0) +#ifdef CONFIG_NUMA +extern unsigned long sum_zone_node_page_state(int node, + enum zone_stat_item item); +extern unsigned long node_page_state(struct pglist_data *pgdat, + enum node_stat_item item); +#else +#define sum_zone_node_page_state(node, item) global_page_state(item) +#define node_page_state(node, item) global_node_page_state(item) #endif /* CONFIG_NUMA */ #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) +#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) +#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); +void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); +void __inc_node_page_state(struct page *, enum node_stat_item); +void __dec_node_page_state(struct page *, enum node_stat_item); + void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); -extern void inc_zone_state(struct zone *, enum zone_stat_item); +void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); +void inc_node_page_state(struct page *, enum node_stat_item); +void dec_node_page_state(struct page *, enum node_stat_item); + +extern void inc_node_state(struct pglist_data *, enum node_stat_item); extern void __inc_zone_state(struct zone *, enum zone_stat_item); +extern void __inc_node_state(struct pglist_data *, enum node_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); +extern void __dec_node_state(struct pglist_data *, enum node_stat_item); void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); +struct ctl_table; +int vmstat_refresh(struct ctl_table *, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); int calculate_pressure_threshold(struct zone *zone); @@ -211,16 +259,34 @@ static inline void __mod_zone_page_state(struct zone *zone, zone_page_state_add(delta, zone, item); } +static inline void __mod_node_page_state(struct pglist_data *pgdat, + enum node_stat_item item, int delta) +{ + node_page_state_add(delta, pgdat, item); +} + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_inc(&zone->vm_stat[item]); - atomic_long_inc(&vm_stat[item]); + atomic_long_inc(&vm_zone_stat[item]); +} + +static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) +{ + atomic_long_inc(&pgdat->vm_stat[item]); + atomic_long_inc(&vm_node_stat[item]); } static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_dec(&zone->vm_stat[item]); - atomic_long_dec(&vm_stat[item]); + atomic_long_dec(&vm_zone_stat[item]); +} + +static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) +{ + atomic_long_dec(&pgdat->vm_stat[item]); + atomic_long_dec(&vm_node_stat[item]); } static inline void __inc_zone_page_state(struct page *page, @@ -229,12 +295,26 @@ static inline void __inc_zone_page_state(struct page *page, __inc_zone_state(page_zone(page), item); } +static inline void __inc_node_page_state(struct page *page, + enum node_stat_item item) +{ + __inc_node_state(page_pgdat(page), item); +} + + static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); } +static inline void __dec_node_page_state(struct page *page, + enum node_stat_item item) +{ + __dec_node_state(page_pgdat(page), item); +} + + /* * We only use atomic operations to update counters. So there is no need to * disable interrupts. @@ -243,7 +323,12 @@ static inline void __dec_zone_page_state(struct page *page, #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state +#define inc_node_page_state __inc_node_page_state +#define dec_node_page_state __dec_node_page_state +#define mod_node_page_state __mod_node_page_state + #define inc_zone_state __inc_zone_state +#define inc_node_state __inc_node_state #define dec_zone_state __dec_zone_state #define set_pgdat_percpu_threshold(pgdat, callback) { } diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 8d7634247fb4..6abd24f258bc 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -45,7 +45,7 @@ void poke_blanked_console(void); int con_font_op(struct vc_data *vc, struct console_font_op *op); int con_set_cmap(unsigned char __user *cmap); int con_get_cmap(unsigned char __user *cmap); -void scrollback(struct vc_data *vc, int lines); +void scrollback(struct vc_data *vc); void scrollfront(struct vc_data *vc, int lines); void clear_buffer_attributes(struct vc_data *vc); void update_region(struct vc_data *vc, unsigned long start, int count); @@ -59,14 +59,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg); #ifdef CONFIG_CONSOLE_TRANSLATIONS /* consolemap.c */ -struct unimapinit; struct unipair; int con_set_trans_old(unsigned char __user * table); int con_get_trans_old(unsigned char __user * table); int con_set_trans_new(unsigned short __user * table); int con_get_trans_new(unsigned short __user * table); -int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui); +int con_clear_unimap(struct vc_data *vc); int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list); int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list); int con_set_default_unimap(struct vc_data *vc); @@ -92,7 +91,7 @@ static inline int con_get_trans_new(unsigned short __user *table) { return -EINVAL; } -static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui) +static inline int con_clear_unimap(struct vc_data *vc) { return 0; } diff --git a/include/linux/vtime.h b/include/linux/vtime.h index fa2196990f84..aa9bfea8804a 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -12,11 +12,9 @@ struct task_struct; /* * vtime_accounting_cpu_enabled() definitions/declarations */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) static inline bool vtime_accounting_cpu_enabled(void) { return true; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) /* * Checks if vtime is enabled on some CPU. Cputime readers want to be careful * in that case and compute the tickless cputime. @@ -37,11 +35,9 @@ static inline bool vtime_accounting_cpu_enabled(void) return false; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ - -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline bool vtime_accounting_cpu_enabled(void) { return false; } -#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* @@ -64,35 +60,15 @@ extern void vtime_account_system(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); extern void vtime_account_user(struct task_struct *tsk); -#ifdef __ARCH_HAS_VTIME_ACCOUNT -extern void vtime_account_irq_enter(struct task_struct *tsk); -#else -extern void vtime_common_account_irq_enter(struct task_struct *tsk); -static inline void vtime_account_irq_enter(struct task_struct *tsk) -{ - if (vtime_accounting_cpu_enabled()) - vtime_common_account_irq_enter(tsk); -} -#endif /* __ARCH_HAS_VTIME_ACCOUNT */ - #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline void vtime_task_switch(struct task_struct *prev) { } static inline void vtime_account_system(struct task_struct *tsk) { } static inline void vtime_account_user(struct task_struct *tsk) { } -static inline void vtime_account_irq_enter(struct task_struct *tsk) { } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void arch_vtime_task_switch(struct task_struct *tsk); -extern void vtime_gen_account_irq_exit(struct task_struct *tsk); - -static inline void vtime_account_irq_exit(struct task_struct *tsk) -{ - if (vtime_accounting_cpu_enabled()) - vtime_gen_account_irq_exit(tsk); -} - extern void vtime_user_enter(struct task_struct *tsk); static inline void vtime_user_exit(struct task_struct *tsk) @@ -103,11 +79,6 @@ extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); extern void vtime_init_idle(struct task_struct *tsk, int cpu); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ -static inline void vtime_account_irq_exit(struct task_struct *tsk) -{ - /* On hard|softirq exit we always account to hard|softirq cputime */ - vtime_account_system(tsk); -} static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } @@ -115,6 +86,19 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { } static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } #endif +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +extern void vtime_account_irq_enter(struct task_struct *tsk); +static inline void vtime_account_irq_exit(struct task_struct *tsk) +{ + /* On hard|softirq exit we always account to hard|softirq cputime */ + vtime_account_system(tsk); +} +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ +static inline void vtime_account_irq_enter(struct task_struct *tsk) { } +static inline void vtime_account_irq_exit(struct task_struct *tsk) { } +#endif + + #ifdef CONFIG_IRQ_TIME_ACCOUNTING extern void irqtime_account_irq(struct task_struct *tsk); #else diff --git a/include/linux/wait.h b/include/linux/wait.h index 27d7a0ab5da3..c3ff74d764fa 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -600,6 +600,19 @@ do { \ __ret; \ }) +#define __wait_event_killable_exclusive(wq, condition) \ + ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ + schedule()) + +#define wait_event_killable_exclusive(wq, condition) \ +({ \ + int __ret = 0; \ + might_sleep(); \ + if (!(condition)) \ + __ret = __wait_event_killable_exclusive(wq, condition); \ + __ret; \ +}) + #define __wait_event_freezable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 51732d6c9555..7047bc7f8106 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -66,7 +66,8 @@ struct watchdog_ops { * as configurable from user space. Only relevant if * max_hw_heartbeat_ms is not provided. * @min_hw_heartbeat_ms: - * Minimum time between heartbeats, in milli-seconds. + * Hardware limit for minimum time between heartbeats, + * in milli-seconds. * @max_hw_heartbeat_ms: * Hardware limit for maximum timeout, in milli-seconds. * Replaces max_timeout if specified. @@ -180,4 +181,7 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd, extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); +/* devres register variant */ +int devm_watchdog_register_device(struct device *dev, struct watchdog_device *); + #endif /* ifndef _LINUX_WATCHDOG_H */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ca73c503b92a..26cc1df280d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu); static inline void wq_watchdog_touch(int cpu) { } #endif /* CONFIG_WQ_WATCHDOG */ +#ifdef CONFIG_SMP +int workqueue_prepare_cpu(unsigned int cpu); +int workqueue_online_cpu(unsigned int cpu); +int workqueue_offline_cpu(unsigned int cpu); +#endif + #endif diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d0b5ca5d4e08..fc1e16c25a29 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -320,7 +320,7 @@ void laptop_mode_timer_fn(unsigned long data); static inline void laptop_sync_completion(void) { } #endif void throttle_vm_writeout(gfp_t gfp_mask); -bool zone_dirty_ok(struct zone *zone); +bool node_dirty_ok(struct pglist_data *pgdat); int wb_domain_init(struct wb_domain *dom, gfp_t gfp); #ifdef CONFIG_CGROUP_WRITEBACK void wb_domain_exit(struct wb_domain *dom); @@ -384,4 +384,7 @@ void tag_pages_for_writeback(struct address_space *mapping, void account_page_redirty(struct page *page); +void sb_mark_inode_writeback(struct inode *inode); +void sb_clear_inode_writeback(struct inode *inode); + #endif /* WRITEBACK_H */ diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 760399a470bd..2bb5deb0012e 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -173,14 +173,14 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) mutex_release(&ctx->dep_map, 0, _THIS_IP_); DEBUG_LOCKS_WARN_ON(ctx->acquired); - if (!config_enabled(CONFIG_PROVE_LOCKING)) + if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) /* * lockdep will normally handle this, * but fail without anyway */ ctx->done_acquire = 1; - if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) + if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)) /* ensure ww_acquire_fini will still fail if called twice */ ctx->acquired = ~0U; #endif diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 4457541de3c9..94079bab9243 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -30,10 +30,11 @@ struct xattr_handler { int flags; /* fs private flags */ bool (*list)(struct dentry *dentry); int (*get)(const struct xattr_handler *, struct dentry *dentry, - const char *name, void *buffer, size_t size); + struct inode *inode, const char *name, void *buffer, + size_t size); int (*set)(const struct xattr_handler *, struct dentry *dentry, - const char *name, const void *buffer, size_t size, - int flags); + struct inode *inode, const char *name, const void *buffer, + size_t size, int flags); }; const char *xattr_full_name(const struct xattr_handler *, const char *); @@ -51,9 +52,10 @@ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, i int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int vfs_removexattr(struct dentry *, const char *); -ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); +ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); -int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); +int generic_setxattr(struct dentry *dentry, struct inode *inode, + const char *name, const void *value, size_t size, int flags); int generic_removexattr(struct dentry *dentry, const char *name); ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 34eb16098a33..57a8e98f2708 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -41,10 +41,10 @@ struct zs_pool_stats { struct zs_pool; -struct zs_pool *zs_create_pool(const char *name, gfp_t flags); +struct zs_pool *zs_create_pool(const char *name); void zs_destroy_pool(struct zs_pool *pool); -unsigned long zs_malloc(struct zs_pool *pool, size_t size); +unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); void zs_free(struct zs_pool *pool, unsigned long obj); void *zs_map_object(struct zs_pool *pool, unsigned long handle, |