diff options
Diffstat (limited to 'include/linux')
228 files changed, 6461 insertions, 2291 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 61a3d90f32b3..43b28f04a1f5 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -220,10 +220,6 @@ int __init acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, unsigned int max_entries); -int __init acpi_table_parse_entries(char *id, unsigned long table_size, - int entry_id, - acpi_tbl_entry_handler handler, - unsigned int max_entries); int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, struct acpi_subtable_proc *proc, int proc_num, unsigned int max_entries); @@ -469,6 +465,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_CPCV2_SUPPORT 0x00000040 #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 +#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h new file mode 100644 index 000000000000..62be3a40239d --- /dev/null +++ b/include/linux/ahci-remap.h @@ -0,0 +1,28 @@ +#ifndef _LINUX_AHCI_REMAP_H +#define _LINUX_AHCI_REMAP_H + +#include <linux/sizes.h> + +#define AHCI_VSCAP 0xa4 +#define AHCI_REMAP_CAP 0x800 + +/* device class code */ +#define AHCI_REMAP_N_DCC 0x880 + +/* remap-device base relative to ahci-bar */ +#define AHCI_REMAP_N_OFFSET SZ_16K +#define AHCI_REMAP_N_SIZE SZ_16K + +#define AHCI_MAX_REMAP 3 + +static inline unsigned int ahci_remap_dcc(int i) +{ + return AHCI_REMAP_N_DCC + i * 0x80; +} + +static inline unsigned int ahci_remap_base(int i) +{ + return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE; +} + +#endif /* _LINUX_AHCI_REMAP_H */ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 9d8031257a90..c70aac13244a 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -10,7 +10,12 @@ enum alarmtimer_type { ALARM_REALTIME, ALARM_BOOTTIME, + /* Supported types end here */ ALARM_NUMTYPE, + + /* Used for tracing information. No usable types. */ + ALARM_REALTIME_FREEZER, + ALARM_BOOTTIME_FREEZER, }; enum alarmtimer_restart { diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h deleted file mode 100644 index fb83c0453489..000000000000 --- a/include/linux/amba/pl061.h +++ /dev/null @@ -1,16 +0,0 @@ -#include <linux/types.h> - -/* platform data for the PL061 GPIO driver */ - -struct pl061_platform_data { - /* number of the first GPIO */ - unsigned gpio_base; - - /* number of the first IRQ. - * If the IRQ functionality in not desired this must be set to 0. - */ - unsigned irq_base; - - u8 directions; /* startup directions, 1: out, 0: in */ - u8 values; /* startup values */ -}; diff --git a/include/linux/ata.h b/include/linux/ata.h index fdb180367ba1..af6859b3a93d 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -348,6 +348,7 @@ enum { ATA_LOG_DEVSLP_DETO = 0x01, ATA_LOG_DEVSLP_VALID = 0x07, ATA_LOG_DEVSLP_VALID_MASK = 0x80, + ATA_LOG_NCQ_PRIO_OFFSET = 0x09, /* NCQ send and receive log */ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, @@ -940,6 +941,11 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id) return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); } +static inline bool ata_id_has_ncq_prio(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY] & BIT(12); +} + static inline bool ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index c357f27d5483..e850e76acaaf 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,8 @@ struct bdi_writeback { struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + unsigned long dirty_sleep; /* last wait */ + struct list_head bdi_node; /* anchored at bdi->wb_list */ #ifdef CONFIG_CGROUP_WRITEBACK @@ -136,12 +138,13 @@ struct bdi_writeback { struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ - unsigned int capabilities; /* Device capabilities */ + unsigned long io_pages; /* max allowed IO size */ congested_fn *congested_fn; /* Function pointer if device is md/dm */ void *congested_data; /* Pointer to aux data for congested func */ char *name; + unsigned int capabilities; /* Device capabilities */ unsigned int min_ratio; unsigned int max_ratio, max_prop_frac; diff --git a/include/linux/bio.h b/include/linux/bio.h index 97cb48f03dc7..7cf8a6c70a3f 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -63,6 +63,12 @@ #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) /* + * Return the data direction, READ or WRITE. + */ +#define bio_data_dir(bio) \ + (op_is_write(bio_op(bio)) ? WRITE : READ) + +/* * Check whether this bio carries any data or not. A NULL bio is allowed. */ static inline bool bio_has_data(struct bio *bio) @@ -70,7 +76,8 @@ static inline bool bio_has_data(struct bio *bio) if (bio && bio->bi_iter.bi_size && bio_op(bio) != REQ_OP_DISCARD && - bio_op(bio) != REQ_OP_SECURE_ERASE) + bio_op(bio) != REQ_OP_SECURE_ERASE && + bio_op(bio) != REQ_OP_WRITE_ZEROES) return true; return false; @@ -80,18 +87,8 @@ static inline bool bio_no_advance_iter(struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || - bio_op(bio) == REQ_OP_WRITE_SAME; -} - -static inline bool bio_is_rw(struct bio *bio) -{ - if (!bio_has_data(bio)) - return false; - - if (bio_no_advance_iter(bio)) - return false; - - return true; + bio_op(bio) == REQ_OP_WRITE_SAME || + bio_op(bio) == REQ_OP_WRITE_ZEROES; } static inline bool bio_mergeable(struct bio *bio) @@ -193,18 +190,20 @@ static inline unsigned bio_segments(struct bio *bio) struct bvec_iter iter; /* - * We special case discard/write same, because they interpret bi_size - * differently: + * We special case discard/write same/write zeroes, because they + * interpret bi_size differently: */ - if (bio_op(bio) == REQ_OP_DISCARD) - return 1; - - if (bio_op(bio) == REQ_OP_SECURE_ERASE) - return 1; - - if (bio_op(bio) == REQ_OP_WRITE_SAME) + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + return 0; + case REQ_OP_WRITE_SAME: return 1; + default: + break; + } bio_for_each_segment(bv, bio, iter) segs++; @@ -409,6 +408,8 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) } +extern blk_qc_t submit_bio(struct bio *); + extern void bio_endio(struct bio *); static inline void bio_io_error(struct bio *bio) @@ -423,13 +424,15 @@ extern int bio_phys_segments(struct request_queue *, struct bio *); extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); -extern void bio_init(struct bio *); +extern void bio_init(struct bio *bio, struct bio_vec *table, + unsigned short max_vecs); extern void bio_reset(struct bio *); void bio_chain(struct bio *, struct bio *); extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); +int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, const struct iov_iter *, gfp_t); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 3bf5d33800ab..01b62e7bac74 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) /** * blkg_rwstat_add - add a value to a blkg_rwstat * @rwstat: target blkg_rwstat - * @op: REQ_OP - * @op_flags: rq_flag_bits + * @op: REQ_OP and flags * @val: value to add * * Add @val to @rwstat. The counters are chosen according to @rw. The * caller is responsible for synchronizing calls to this function. */ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, - int op, int op_flags, uint64_t val) + unsigned int op, uint64_t val) { struct percpu_counter *cnt; @@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); - if (op_flags & REQ_SYNC) + if (op_is_sync(op)) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; @@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); + blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 535ab2e13d2e..4a2ab5d99ff7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -3,6 +3,7 @@ #include <linux/blkdev.h> #include <linux/sbitmap.h> +#include <linux/srcu.h> struct blk_mq_tags; struct blk_flush_queue; @@ -35,6 +36,8 @@ struct blk_mq_hw_ctx { struct blk_mq_tags *tags; + struct srcu_struct queue_rq_srcu; + unsigned long queued; unsigned long run; #define BLK_MQ_MAX_DISPATCH_ORDER 7 @@ -215,18 +218,20 @@ void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, int error); void __blk_mq_end_request(struct request *rq, int error); -void blk_mq_requeue_request(struct request *rq); -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); -void blk_mq_cancel_requeue_work(struct request_queue *q); +void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, + bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq, int error); +bool blk_mq_queue_stopped(struct request_queue *q); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); @@ -237,6 +242,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); +int blk_mq_map_queues(struct blk_mq_tag_set *set); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); /* diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index cd395ecec99d..519ea2c9df61 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -17,7 +17,6 @@ struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); -#ifdef CONFIG_BLOCK /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -88,24 +87,6 @@ struct bio { struct bio_vec bi_inline_vecs[0]; }; -#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) -#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) -#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) - -#define bio_set_op_attrs(bio, op, op_flags) do { \ - if (__builtin_constant_p(op)) \ - BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ - else \ - WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ - if (__builtin_constant_p(op_flags)) \ - BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ - else \ - WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ - (bio)->bi_opf = bio_flags(bio); \ - (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ - (bio)->bi_opf |= (op_flags); \ -} while (0) - #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) /* @@ -119,6 +100,8 @@ struct bio { #define BIO_QUIET 6 /* Make BIO Quiet */ #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ +#define BIO_THROTTLED 9 /* This bio has already been subjected to + * throttling rules. Don't do it again. */ /* * Flags starting here get preserved by bio_reset() - this includes @@ -142,53 +125,61 @@ struct bio { #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) -#endif /* CONFIG_BLOCK */ - /* - * Request flags. For use in the cmd_flags field of struct request, and in - * bi_opf of struct bio. Note that some flags are only valid in either one. + * Operations and flags common to the bio and request structures. + * We use 8 bits for encoding the operation, and the remaining 24 for flags. + * + * The least significant bit of the operation number indicates the data + * transfer direction: + * + * - if the least significant bit is set transfers are TO the device + * - if the least significant bit is not set transfers are FROM the device + * + * If a operation does not transfer data the least significant bit has no + * meaning. */ -enum rq_flag_bits { - /* common flags */ - __REQ_FAILFAST_DEV, /* no driver retries of device errors */ +#define REQ_OP_BITS 8 +#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) +#define REQ_FLAG_BITS 24 + +enum req_opf { + /* read sectors from the device */ + REQ_OP_READ = 0, + /* write sectors to the device */ + REQ_OP_WRITE = 1, + /* flush the volatile write cache */ + REQ_OP_FLUSH = 2, + /* discard sectors */ + REQ_OP_DISCARD = 3, + /* get zone information */ + REQ_OP_ZONE_REPORT = 4, + /* securely erase sectors */ + REQ_OP_SECURE_ERASE = 5, + /* seset a zone write pointer */ + REQ_OP_ZONE_RESET = 6, + /* write the same sector many times */ + REQ_OP_WRITE_SAME = 7, + /* write the zero filled sector many times */ + REQ_OP_WRITE_ZEROES = 8, + + REQ_OP_LAST, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = /* no driver retries of device errors */ + REQ_OP_BITS, __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ - __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_PRIO, /* boost priority in cfq */ - - __REQ_NOIDLE, /* don't anticipate more IO after this one */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_IDLE, /* anticipate more IO after this one */ __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ __REQ_PREFLUSH, /* request for cache flush */ - - /* bio only flags */ __REQ_RAHEAD, /* read ahead, can fail anytime */ - __REQ_THROTTLED, /* This bio has already been subjected to - * throttling rules. Don't do it again. */ - - /* request only flags */ - __REQ_SORTED, /* elevator knows about this request */ - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ - __REQ_NOMERGE, /* don't touch this for merging */ - __REQ_STARTED, /* drive already may have started this one */ - __REQ_DONTPREP, /* don't call prep for this one */ - __REQ_QUEUED, /* uses queueing */ - __REQ_ELVPRIV, /* elevator private data attached */ - __REQ_FAILED, /* set if the request failed */ - __REQ_QUIET, /* don't worry about errors */ - __REQ_PREEMPT, /* set for "ide_preempt" requests and also - for requests for which the SCSI "quiesce" - state must be ignored. */ - __REQ_ALLOCED, /* request came from our alloc pool */ - __REQ_COPY_USER, /* contains copies of user pages */ - __REQ_FLUSH_SEQ, /* request for flush sequence */ - __REQ_IO_STAT, /* account I/O stat */ - __REQ_MIXED_MERGE, /* merge of different types, fail separately */ - __REQ_PM, /* runtime pm request */ - __REQ_HASHED, /* on IO scheduler merge hash */ - __REQ_MQ_INFLIGHT, /* track inflight for MQ */ + __REQ_BACKGROUND, /* background IO */ __REQ_NR_BITS, /* stops here */ }; @@ -198,54 +189,47 @@ enum rq_flag_bits { #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) -#define REQ_NOIDLE (1ULL << __REQ_NOIDLE) +#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) +#define REQ_IDLE (1ULL << __REQ_IDLE) #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) +#define REQ_FUA (1ULL << __REQ_FUA) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) +#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) +#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) -#define REQ_COMMON_MASK \ - (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ - REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) -#define REQ_CLONE_MASK REQ_COMMON_MASK -/* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) + (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) -#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) -#define REQ_THROTTLED (1ULL << __REQ_THROTTLED) +#define bio_op(bio) \ + ((bio)->bi_opf & REQ_OP_MASK) +#define req_op(req) \ + ((req)->cmd_flags & REQ_OP_MASK) -#define REQ_SORTED (1ULL << __REQ_SORTED) -#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) -#define REQ_FUA (1ULL << __REQ_FUA) -#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) -#define REQ_STARTED (1ULL << __REQ_STARTED) -#define REQ_DONTPREP (1ULL << __REQ_DONTPREP) -#define REQ_QUEUED (1ULL << __REQ_QUEUED) -#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) -#define REQ_FAILED (1ULL << __REQ_FAILED) -#define REQ_QUIET (1ULL << __REQ_QUIET) -#define REQ_PREEMPT (1ULL << __REQ_PREEMPT) -#define REQ_ALLOCED (1ULL << __REQ_ALLOCED) -#define REQ_COPY_USER (1ULL << __REQ_COPY_USER) -#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) -#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) -#define REQ_IO_STAT (1ULL << __REQ_IO_STAT) -#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) -#define REQ_PM (1ULL << __REQ_PM) -#define REQ_HASHED (1ULL << __REQ_HASHED) -#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) - -enum req_op { - REQ_OP_READ, - REQ_OP_WRITE, - REQ_OP_DISCARD, /* request to discard sectors */ - REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ - REQ_OP_WRITE_SAME, /* write same block many times */ - REQ_OP_FLUSH, /* request for cache flush */ -}; +/* obsolete, don't use in new code */ +static inline void bio_set_op_attrs(struct bio *bio, unsigned op, + unsigned op_flags) +{ + bio->bi_opf = op | op_flags; +} -#define REQ_OP_BITS 3 +static inline bool op_is_write(unsigned int op) +{ + return (op & 1); +} + +/* + * Reads are always treated as synchronous, as are requests with the FUA or + * PREFLUSH flag. Other operations may be marked as synchronous using the + * REQ_SYNC flag. + */ +static inline bool op_is_sync(unsigned int op) +{ + return (op & REQ_OP_MASK) == REQ_OP_READ || + (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); +} typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U @@ -271,4 +255,20 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) return cookie & ((1u << BLK_QC_T_SHIFT) - 1); } +struct blk_issue_stat { + u64 time; +}; + +#define BLK_RQ_STAT_BATCH 64 + +struct blk_rq_stat { + s64 mean; + u64 min; + u64 max; + s32 nr_samples; + s32 nr_batch; + u64 batch; + s64 time; +}; + #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c47c358ba052..286b2a264383 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -24,6 +24,7 @@ #include <linux/rcupdate.h> #include <linux/percpu-refcount.h> #include <linux/scatterlist.h> +#include <linux/blkzoned.h> struct module; struct scsi_ioctl_command; @@ -37,6 +38,7 @@ struct bsg_job; struct blkcg_gq; struct blk_flush_queue; struct pr_ops; +struct rq_wb; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -77,6 +79,55 @@ enum rq_cmd_type_bits { REQ_TYPE_DRV_PRIV, /* driver defined types from here */ }; +/* + * request flags */ +typedef __u32 __bitwise req_flags_t; + +/* elevator knows about this request */ +#define RQF_SORTED ((__force req_flags_t)(1 << 0)) +/* drive already may have started this one */ +#define RQF_STARTED ((__force req_flags_t)(1 << 1)) +/* uses tagged queueing */ +#define RQF_QUEUED ((__force req_flags_t)(1 << 2)) +/* may not be passed by ioscheduler */ +#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) +/* request for flush sequence */ +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) +/* merge of different types, fail separately */ +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) +/* track inflight for MQ */ +#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) +/* don't call prep for this one */ +#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) +/* set for "ide_preempt" requests and also for requests for which the SCSI + "quiesce" state must be ignored. */ +#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) +/* contains copies of user pages */ +#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) +/* vaguely specified driver internal error. Ignored by the block layer */ +#define RQF_FAILED ((__force req_flags_t)(1 << 10)) +/* don't warn about errors */ +#define RQF_QUIET ((__force req_flags_t)(1 << 11)) +/* elevator private data attached */ +#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) +/* account I/O stat */ +#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) +/* request came from our alloc pool */ +#define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) +/* runtime pm request */ +#define RQF_PM ((__force req_flags_t)(1 << 15)) +/* on IO scheduler merge hash */ +#define RQF_HASHED ((__force req_flags_t)(1 << 16)) +/* IO stats tracking on */ +#define RQF_STATS ((__force req_flags_t)(1 << 17)) +/* Look at ->special_vec for the actual data payload instead of the + bio chain. */ +#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) + +/* flags that prevent us from merging requests: */ +#define RQF_NOMERGE_FLAGS \ + (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) + #define BLK_MAX_CDB 16 /* @@ -97,7 +148,8 @@ struct request { int cpu; unsigned cmd_type; - u64 cmd_flags; + unsigned int cmd_flags; /* op and common flags */ + req_flags_t rq_flags; unsigned long atomic_flags; /* the following two fields are internal, NEVER access directly */ @@ -126,6 +178,7 @@ struct request { */ union { struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; void *completion_data; }; @@ -151,6 +204,7 @@ struct request { struct gendisk *rq_disk; struct hd_struct *part; unsigned long start_time; + struct blk_issue_stat issue_stat; #ifdef CONFIG_BLK_CGROUP struct request_list *rl; /* rl this rq is alloced from */ unsigned long long start_time_ns; @@ -198,20 +252,6 @@ struct request { struct request *next_rq; }; -#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) -#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) - -#define req_set_op(req, op) do { \ - WARN_ON(op >= (1 << REQ_OP_BITS)); \ - (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ - (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ -} while (0) - -#define req_set_op_attrs(req, op, flags) do { \ - req_set_op(req, op); \ - (req)->cmd_flags |= flags; \ -} while (0) - static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -261,6 +301,15 @@ struct blk_queue_tag { #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) +/* + * Zoned block device models (zoned limit). + */ +enum blk_zoned_model { + BLK_ZONED_NONE, /* Regular block device */ + BLK_ZONED_HA, /* Host-aware zoned block device */ + BLK_ZONED_HM, /* Host-managed zoned block device */ +}; + struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; @@ -278,6 +327,7 @@ struct queue_limits { unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; + unsigned int max_write_zeroes_sectors; unsigned int discard_granularity; unsigned int discard_alignment; @@ -290,8 +340,45 @@ struct queue_limits { unsigned char cluster; unsigned char discard_zeroes_data; unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; }; +#ifdef CONFIG_BLK_DEV_ZONED + +struct blk_zone_report_hdr { + unsigned int nr_zones; + u8 padding[60]; +}; + +extern int blkdev_report_zones(struct block_device *bdev, + sector_t sector, struct blk_zone *zones, + unsigned int *nr_zones, gfp_t gfp_mask); +extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, + sector_t nr_sectors, gfp_t gfp_mask); + +extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); + +#else /* CONFIG_BLK_DEV_ZONED */ + +static inline int blkdev_report_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +#endif /* CONFIG_BLK_DEV_ZONED */ + struct request_queue { /* * Together with queue_head for cacheline sharing @@ -302,6 +389,8 @@ struct request_queue { int nr_rqs[2]; /* # allocated [a]sync rqs */ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ + struct rq_wb *rq_wb; + /* * If blkcg is not used, @q->root_rl serves all requests. If blkcg * is used, root blkg allocates from @q->root_rl and all other @@ -327,6 +416,8 @@ struct request_queue { struct blk_mq_ctx __percpu *queue_ctx; unsigned int nr_queues; + unsigned int queue_depth; + /* hw dispatch queues */ struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; @@ -412,6 +503,9 @@ struct request_queue { unsigned int nr_sorted; unsigned int in_flight[2]; + + struct blk_rq_stat rq_stats[2]; + /* * Number of active block driver functions for which blk_drain_queue() * must wait. Must be incremented around functions that unlock the @@ -420,6 +514,7 @@ struct request_queue { unsigned int request_fn_active; unsigned int rq_timeout; + int poll_nsec; struct timer_list timeout; struct work_struct timeout_work; struct list_head timeout_list; @@ -505,6 +600,7 @@ struct request_queue { #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ #define QUEUE_FLAG_DAX 26 /* device supports DAX */ +#define QUEUE_FLAG_STATS 27 /* track rq completion times */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -601,7 +697,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) REQ_FAILFAST_DRIVER)) #define blk_account_rq(rq) \ - (((rq)->cmd_flags & REQ_STARTED) && \ + (((rq)->rq_flags & RQF_STARTED) && \ ((rq)->cmd_type == REQ_TYPE_FS)) #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) @@ -627,17 +723,31 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q) return q->limits.cluster; } -/* - * We regard a request as sync, if either a read or a sync write - */ -static inline bool rw_is_sync(int op, unsigned int rw_flags) +static inline enum blk_zoned_model +blk_queue_zoned_model(struct request_queue *q) +{ + return q->limits.zoned; +} + +static inline bool blk_queue_is_zoned(struct request_queue *q) +{ + switch (blk_queue_zoned_model(q)) { + case BLK_ZONED_HA: + case BLK_ZONED_HM: + return true; + default: + return false; + } +} + +static inline unsigned int blk_queue_zone_size(struct request_queue *q) { - return op == REQ_OP_READ || (rw_flags & REQ_SYNC); + return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; } static inline bool rq_is_sync(struct request *rq) { - return rw_is_sync(req_op(rq), rq->cmd_flags); + return op_is_sync(rq->cmd_flags); } static inline bool blk_rl_full(struct request_list *rl, bool sync) @@ -669,8 +779,13 @@ static inline bool rq_mergeable(struct request *rq) if (req_op(rq) == REQ_OP_FLUSH) return false; + if (req_op(rq) == REQ_OP_WRITE_ZEROES) + return false; + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; + if (rq->rq_flags & RQF_NOMERGE_FLAGS) + return false; return true; } @@ -683,6 +798,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) return false; } +static inline unsigned int blk_queue_depth(struct request_queue *q) +{ + if (q->queue_depth) + return q->queue_depth; + + return q->nr_requests; +} + /* * q->prep_rq_fn return values */ @@ -790,8 +913,6 @@ extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); -extern void blk_add_request_payload(struct request *rq, struct page *page, - int offset, unsigned int len); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, @@ -824,6 +945,7 @@ extern void __blk_run_queue(struct request_queue *q); extern void __blk_run_queue_uncond(struct request_queue *q); extern void blk_run_queue(struct request_queue *); extern void blk_run_queue_async(struct request_queue *q); +extern void blk_mq_quiesce_queue(struct request_queue *q); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); @@ -837,7 +959,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); -bool blk_poll(struct request_queue *q, blk_qc_t cookie); +bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { @@ -888,6 +1010,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; + if (unlikely(op == REQ_OP_WRITE_ZEROES)) + return q->limits.max_write_zeroes_sectors; + return q->limits.max_sectors; } @@ -934,6 +1059,20 @@ static inline unsigned int blk_rq_count_bios(struct request *rq) } /* + * blk_rq_set_prio - associate a request with prio from ioc + * @rq: request of interest + * @ioc: target iocontext + * + * Assocate request prio with ioc prio so request based drivers + * can leverage priority information. + */ +static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc) +{ + if (ioc) + rq->ioprio = ioc->ioprio; +} + +/* * Request issue related functions. */ extern struct request *blk_peek_request(struct request_queue *q); @@ -991,6 +1130,8 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); extern void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors); +extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, @@ -999,6 +1140,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, @@ -1027,6 +1169,13 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); +static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return 1; + return rq->nr_phys_segments; +} + extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); @@ -1057,7 +1206,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q) static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} static inline void blk_pre_runtime_resume(struct request_queue *q) {} static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} -extern inline void blk_set_runtime_active(struct request_queue *q) {} +static inline void blk_set_runtime_active(struct request_queue *q) {} #endif /* @@ -1078,6 +1227,7 @@ struct blk_plug { struct list_head cb_list; /* md requires an unplug callback */ }; #define BLK_MAX_REQUEST_COUNT 16 +#define BLK_PLUG_FLUSH_SIZE (128 * 1024) struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); @@ -1151,6 +1301,9 @@ extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, struct bio **biop); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); +extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, + bool discard); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, bool discard); static inline int sb_issue_discard(struct super_block *sb, sector_t block, @@ -1354,6 +1507,46 @@ static inline unsigned int bdev_write_same(struct block_device *bdev) return 0; } +static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_zeroes_sectors; + + return 0; +} + +static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zoned_model(q); + + return BLK_ZONED_NONE; +} + +static inline bool bdev_is_zoned(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_is_zoned(q); + + return false; +} + +static inline unsigned int bdev_zone_size(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zone_size(q); + + return 0; +} + static inline int queue_dma_alignment(struct request_queue *q) { return q ? q->dma_alignment : 511; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index cceb72f9e29f..e417f080219a 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) } extern void blk_dump_cmd(char *buf, struct request *rq); -extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); +extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h new file mode 100644 index 000000000000..7b6e5d168c95 --- /dev/null +++ b/include/linux/bpf-cgroup.h @@ -0,0 +1,92 @@ +#ifndef _BPF_CGROUP_H +#define _BPF_CGROUP_H + +#include <linux/jump_label.h> +#include <uapi/linux/bpf.h> + +struct sock; +struct cgroup; +struct sk_buff; + +#ifdef CONFIG_CGROUP_BPF + +extern struct static_key_false cgroup_bpf_enabled_key; +#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) + +struct cgroup_bpf { + /* + * Store two sets of bpf_prog pointers, one for programs that are + * pinned directly to this cgroup, and one for those that are effective + * when this cgroup is accessed. + */ + struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; + struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE]; +}; + +void cgroup_bpf_put(struct cgroup *cgrp); +void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); + +void __cgroup_bpf_update(struct cgroup *cgrp, + struct cgroup *parent, + struct bpf_prog *prog, + enum bpf_attach_type type); + +/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ +void cgroup_bpf_update(struct cgroup *cgrp, + struct bpf_prog *prog, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_skb(struct sock *sk, + struct sk_buff *skb, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sk(struct sock *sk, + enum bpf_attach_type type); + +/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ + BPF_CGROUP_INET_INGRESS); \ + \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ + typeof(sk) __sk = sk_to_full_sk(sk); \ + if (sk_fullsock(__sk)) \ + __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ + BPF_CGROUP_INET_EGRESS); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && sk) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, \ + BPF_CGROUP_INET_SOCK_CREATE); \ + } \ + __ret; \ +}) + +#else + +struct cgroup_bpf {}; +static inline void cgroup_bpf_put(struct cgroup *cgrp) {} +static inline void cgroup_bpf_inherit(struct cgroup *cgrp, + struct cgroup *parent) {} + +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) + +#endif /* CONFIG_CGROUP_BPF */ + +#endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c201017b5730..8796ff03f472 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +void bpf_prog_calc_digest(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@ -233,13 +234,14 @@ void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); -struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i); -struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); +struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); +void bpf_prog_sub(struct bpf_prog *prog, int i); +struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); -struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); +struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); @@ -298,15 +300,21 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, { return ERR_PTR(-EOPNOTSUPP); } -static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) +static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, + int i) { return ERR_PTR(-EOPNOTSUPP); } +static inline void bpf_prog_sub(struct bpf_prog *prog, int i) +{ +} + static inline void bpf_prog_put(struct bpf_prog *prog) { } -static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) + +static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) { return ERR_PTR(-EOPNOTSUPP); } @@ -319,6 +327,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; +extern const struct bpf_func_proto bpf_get_numa_node_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; extern const struct bpf_func_proto bpf_ktime_get_ns_proto; extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7035b997aaa5..a13b031dc6b8 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -14,22 +14,16 @@ * are obviously wrong for any sort of memory access. */ #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) -#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) +#define BPF_REGISTER_MIN_RANGE -1 struct bpf_reg_state { enum bpf_reg_type type; - /* - * Used to determine if any memory access using this register will - * result in a bad access. - */ - u64 min_value, max_value; union { /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ s64 imm; /* valid when type == PTR_TO_PACKET* */ struct { - u32 id; u16 off; u16 range; }; @@ -39,6 +33,13 @@ struct bpf_reg_state { */ struct bpf_map *map_ptr; }; + u32 id; + /* Used to determine if any memory access using this register will + * result in a bad access. These two fields must be last. + * See states_equal() + */ + s64 min_value; + u64 max_value; }; enum bpf_stack_slot_type { diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index e3354b74286c..4f7d8be9ddbf 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -13,11 +13,13 @@ #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM54810 0x03625d00 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 +#define PHY_ID_BCM54612E 0x03625e60 #define PHY_ID_BCM54616S 0x03625d10 #define PHY_ID_BCM57780 0x03625d90 @@ -55,6 +57,7 @@ #define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 + /* Broadcom BCM7xxx specific workarounds */ #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) #define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) @@ -105,11 +108,15 @@ #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 +#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100 #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 #define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 +#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8) +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4) -#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 /* * Broadcom LED source encodings. These are used in BCM5461, BCM5481, @@ -124,6 +131,7 @@ #define BCM_LED_SRC_INTR 0x6 #define BCM_LED_SRC_QUALITY 0x7 #define BCM_LED_SRC_RCVLED 0x8 +#define BCM_LED_SRC_WIRESPEED 0x9 #define BCM_LED_SRC_MULTICOLOR1 0xa #define BCM_LED_SRC_OPENSHORT 0xb #define BCM_LED_SRC_OFF 0xe /* Tied high */ @@ -135,6 +143,14 @@ * Shadow values go into bits [14:10] of register 0x1c to select a shadow * register to access. */ + +/* 00100: Reserved control register 2 */ +#define BCM54XX_SHD_SCR2 0x04 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7 + /* 00101: Spare Control Register 3 */ #define BCM54XX_SHD_SCR3 0x05 #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 @@ -189,6 +205,12 @@ #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ +/* BCM54810 Registers */ +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) +#define BCM54810_SHD_CLK_CTL 0x3 +#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) + /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ @@ -222,6 +244,9 @@ #define LPI_FEATURE_EN_DIG1000X 0x4000 /* Core register definitions*/ +#define MII_BRCM_CORE_BASE12 0x12 +#define MII_BRCM_CORE_BASE13 0x13 +#define MII_BRCM_CORE_BASE14 0x14 #define MII_BRCM_CORE_BASE1E 0x1E #define MII_BRCM_CORE_EXPB0 0xB0 #define MII_BRCM_CORE_EXPB1 0xB1 diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index a226652a5a6c..657a718c27d2 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -40,6 +40,8 @@ struct bsg_job { struct device *dev; struct request *req; + struct kref kref; + /* Transport/driver specific request/reply structs */ void *request; void *reply; @@ -67,5 +69,7 @@ void bsg_job_done(struct bsg_job *job, int result, int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, bsg_job_fn *job_fn, int dd_job_size); void bsg_request_fn(struct request_queue *q); +void bsg_job_put(struct bsg_job *job); +int __must_check bsg_job_get(struct bsg_job *job); #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index ebbacd14d450..d67ab83823ad 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -168,7 +168,12 @@ int inode_has_buffers(struct inode *); void invalidate_inode_buffers(struct inode *); int remove_inode_buffers(struct inode *inode); int sync_mapping_buffers(struct address_space *mapping); -void unmap_underlying_metadata(struct block_device *bdev, sector_t block); +void clean_bdev_aliases(struct block_device *bdev, sector_t block, + sector_t len); +static inline void clean_bdev_bh_alias(struct buffer_head *bh) +{ + clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); +} void mark_buffer_async_write(struct buffer_head *bh); void __wait_on_buffer(struct buffer_head *); diff --git a/include/linux/bug.h b/include/linux/bug.h index 292d6a10b0c2..baff2e8fc8a8 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -121,4 +121,21 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, } #endif /* CONFIG_GENERIC_BUG */ + +/* + * Since detected data corruption should stop operation on the affected + * structures, this returns false if the corruption condition is found. + */ +#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ + do { \ + if (unlikely(condition)) { \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ + pr_err(fmt, ##__VA_ARGS__); \ + BUG(); \ + } else \ + WARN(1, fmt, ##__VA_ARGS__); \ + return false; \ + } \ + } while (0) + #endif /* _LINUX_BUG_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2189935075b4..a951fd10aaaa 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -71,6 +71,7 @@ struct cpu_cacheinfo { struct cacheinfo *info_list; unsigned int num_levels; unsigned int num_leaves; + bool cpu_map_populated; }; /* diff --git a/include/linux/capability.h b/include/linux/capability.h index dbc21c719ce6..6ffb67e10c06 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -240,8 +240,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) return true; } #endif /* CONFIG_MULTIUSER */ +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); +extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/ccp.h b/include/linux/ccp.h index a7653339fedb..c71dd8fa5764 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -11,8 +11,8 @@ * published by the Free Software Foundation. */ -#ifndef __CPP_H__ -#define __CPP_H__ +#ifndef __CCP_H__ +#define __CCP_H__ #include <linux/scatterlist.h> #include <linux/workqueue.h> @@ -553,7 +553,7 @@ enum ccp_engine { #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 /** - * struct ccp_cmd - CPP operation request + * struct ccp_cmd - CCP operation request * @entry: list element (ccp driver use only) * @work: work element used for callbacks (ccp driver use only) * @ccp: CCP device to be run on (ccp driver use only) diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 8dbd7879fdc6..67bcef2ecddb 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -1,7 +1,7 @@ #ifndef __FS_CEPH_MESSENGER_H #define __FS_CEPH_MESSENGER_H -#include <linux/blk_types.h> +#include <linux/bvec.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/net.h> diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 5b17de62c962..861b4677fc5b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -16,6 +16,7 @@ #include <linux/percpu-refcount.h> #include <linux/percpu-rwsem.h> #include <linux/workqueue.h> +#include <linux/bpf-cgroup.h> #ifdef CONFIG_CGROUPS @@ -300,6 +301,9 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; + /* used to store eBPF programs */ + struct cgroup_bpf bpf; + /* ids of the ancestors at each level including self */ int ancestor_ids[]; }; diff --git a/include/linux/clk.h b/include/linux/clk.h index 123c02788807..e9d36b3e49de 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -17,8 +17,9 @@ #include <linux/notifier.h> struct device; - struct clk; +struct device_node; +struct of_phandle_args; /** * DOC: clk notifier callback types @@ -249,6 +250,23 @@ struct clk *clk_get(struct device *dev, const char *id); struct clk *devm_clk_get(struct device *dev, const char *id); /** + * devm_get_clk_from_child - lookup and obtain a managed reference to a + * clock producer from child node. + * @dev: device for clock "consumer" + * @np: pointer to clock consumer node + * @con_id: clock consumer ID + * + * This function parses the clocks, and uses them to look up the + * struct clk from the registered list of clock providers by using + * @np and @con_id + * + * The clock will automatically be freed when the device is unbound + * from the bus. + */ +struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id); + +/** * clk_enable - inform the system when the clock source should be running. * @clk: clock source * @@ -432,6 +450,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } +static inline struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id) +{ + return NULL; +} + static inline void clk_put(struct clk *clk) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} @@ -501,9 +525,6 @@ static inline void clk_disable_unprepare(struct clk *clk) clk_unprepare(clk); } -struct device_node; -struct of_phandle_args; - #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) struct clk *of_clk_get(struct device_node *np, int index); struct clk *of_clk_get_by_name(struct device_node *np, const char *name); diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index ba6fa4148515..9ebf1f8243bb 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h @@ -20,10 +20,6 @@ struct device; struct device_node; struct generic_pm_domain; -void r8a7778_clocks_init(u32 mode); -void r8a7779_clocks_init(u32 mode); -void rcar_gen2_clocks_init(u32 mode); - void cpg_mstp_add_clk_domain(struct device_node *np); #ifdef CONFIG_CLK_RENESAS_CPG_MSTP int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 08398182f56e..65602d395a52 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -169,7 +169,10 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * - * Converts cycles to nanoseconds, using the given mult and shift. + * Converts clocksource cycles to nanoseconds, using the given @mult and @shift. + * The code is optimized for performance and is not intended to work + * with absolute clocksource cycles (as those will easily overflow), + * but is only intended to be used with relative (delta) clocksource cycles. * * XXX - This could use some mult_lxl_ll() asm optimization */ diff --git a/include/linux/cma.h b/include/linux/cma.h index 29f9e774ab76..6f0a91b37f68 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -1,6 +1,9 @@ #ifndef __CMA_H__ #define __CMA_H__ +#include <linux/init.h> +#include <linux/types.h> + /* * There is always at least global CMA area and a few optional * areas configured in kernel .config. diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 432f5c97e18f..0444b1336268 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -21,7 +21,7 @@ * clobbered. The issue is as follows: while the inline asm might * access any memory it wants, the compiler could have fit all of * @ptr into memory registers instead, and since @ptr never escaped - * from that, it proofed that the inline asm wasn't touching any of + * from that, it proved that the inline asm wasn't touching any of * it. This version works well with both compilers, i.e. we're telling * the compiler that the inline asm absolutely may see the contents * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 @@ -263,7 +263,9 @@ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ -#if GCC_VERSION >= 50000 +#if GCC_VERSION >= 70000 +#define KASAN_ABI_VERSION 5 +#elif GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d9d6a9d77489..9a30b921f740 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -228,7 +228,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \ struct configfs_item_operations { void (*release)(struct config_item *); int (*allow_link)(struct config_item *src, struct config_item *target); - int (*drop_link)(struct config_item *src, struct config_item *target); + void (*drop_link)(struct config_item *src, struct config_item *target); }; struct configfs_group_operations { diff --git a/include/linux/console.h b/include/linux/console.h index d530c4627e54..9c26c6685587 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -28,9 +28,17 @@ struct tty_struct; #define VT100ID "\033[?1;2c" #define VT102ID "\033[?6c" +enum con_scroll { + SM_UP, + SM_DOWN, +}; + /** * struct consw - callbacks for consoles * + * @con_scroll: move lines from @top to @bottom in direction @dir by @lines. + * Return true if no generic handling should be done. + * Invoked by csi_M and printing to the console. * @con_set_palette: sets the palette of the console to @table (optional) * @con_scrolldelta: the contents of the console should be scrolled by @lines. * Invoked by user. (optional) @@ -44,7 +52,9 @@ struct consw { void (*con_putc)(struct vc_data *, int, int, int); void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); void (*con_cursor)(struct vc_data *, int); - int (*con_scroll)(struct vc_data *, int, int, int, int); + bool (*con_scroll)(struct vc_data *, unsigned int top, + unsigned int bottom, enum con_scroll dir, + unsigned int lines); int (*con_switch)(struct vc_data *); int (*con_blank)(struct vc_data *, int, int); int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); @@ -99,10 +109,6 @@ static inline int con_debug_leave(void) } #endif -/* scroll */ -#define SM_UP (1) -#define SM_DOWN (2) - /* cursor */ #define CM_DRAW (1) #define CM_ERASE (2) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b886dc17f2f3..09807c2ce328 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -93,22 +93,16 @@ extern bool cpuhp_tasks_frozen; { .notifier_call = fn, .priority = pri }; \ __register_cpu_notifier(&fn##_nb); \ } -#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ -#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) -#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) -#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ -#ifdef CONFIG_HOTPLUG_CPU extern int register_cpu_notifier(struct notifier_block *nb); extern int __register_cpu_notifier(struct notifier_block *nb); extern void unregister_cpu_notifier(struct notifier_block *nb); extern void __unregister_cpu_notifier(struct notifier_block *nb); -#else -#ifndef MODULE -extern int register_cpu_notifier(struct notifier_block *nb); -extern int __register_cpu_notifier(struct notifier_block *nb); -#else +#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) + static inline int register_cpu_notifier(struct notifier_block *nb) { return 0; @@ -118,7 +112,6 @@ static inline int __register_cpu_notifier(struct notifier_block *nb) { return 0; } -#endif static inline void unregister_cpu_notifier(struct notifier_block *nb) { @@ -245,6 +238,8 @@ void arch_cpu_idle_dead(void); int cpu_report_state(int cpu); int cpu_check_up_prepare(int cpu); void cpu_set_state_online(int cpu); +void play_idle(unsigned long duration_ms); + #ifdef CONFIG_HOTPLUG_CPU bool cpu_wait_death(unsigned int cpu, int seconds); bool cpu_report_death(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 32dc0cbd51ca..7e05c5e4e45c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -175,7 +175,7 @@ void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); -int cpufreq_update_policy(unsigned int cpu); +void cpufreq_update_policy(unsigned int cpu); bool have_governor_per_policy(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); @@ -234,6 +234,10 @@ __ATTR(_name, _perm, show_##_name, NULL) static struct freq_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) +#define cpufreq_freq_attr_wo(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0200, NULL, store_##_name) + struct global_attr { struct attribute attr; ssize_t (*show)(struct kobject *kobj, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index afe641c02dca..22acee76cf4c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -16,9 +16,11 @@ enum cpuhp_state { CPUHP_PERF_SUPERH, CPUHP_X86_HPET_DEAD, CPUHP_X86_APB_DEAD, + CPUHP_X86_MCE_DEAD, CPUHP_VIRT_NET_DEAD, CPUHP_SLUB_DEAD, CPUHP_MM_WRITEBACK_DEAD, + CPUHP_MM_VMSTAT_DEAD, CPUHP_SOFTIRQ_DEAD, CPUHP_NET_MVNETA_DEAD, CPUHP_CPUIDLE_DEAD, @@ -30,6 +32,15 @@ enum cpuhp_state { CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, + CPUHP_FS_BUFF_DEAD, + CPUHP_PRINTK_DEAD, + CPUHP_MM_MEMCQ_DEAD, + CPUHP_PERCPU_CNT_DEAD, + CPUHP_RADIX_DEAD, + CPUHP_PAGE_ALLOC_DEAD, + CPUHP_NET_DEV_DEAD, + CPUHP_PCI_XGENE_DEAD, + CPUHP_IOMMU_INTEL_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -49,6 +60,16 @@ enum cpuhp_state { CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, CPUHP_BLK_MQ_PREPARE, + CPUHP_NET_FLOW_PREPARE, + CPUHP_TOPOLOGY_PREPARE, + CPUHP_NET_IUCV_PREPARE, + CPUHP_ARM_BL_PREPARE, + CPUHP_TRACE_RB_PREPARE, + CPUHP_MM_ZS_PREPARE, + CPUHP_MM_ZSWP_MEM_PREPARE, + CPUHP_MM_ZSWP_POOL_PREPARE, + CPUHP_KVM_PPC_BOOK3S_PREPARE, + CPUHP_ZCOMP_PREPARE, CPUHP_TIMERS_DEAD, CPUHP_NOTF_ERR_INJ_PREPARE, CPUHP_MIPS_SOC_PREPARE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bb31373c3478..da346f2817a8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -74,6 +74,7 @@ struct cpuidle_driver_kobj; struct cpuidle_device { unsigned int registered:1; unsigned int enabled:1; + unsigned int use_deepest_state:1; unsigned int cpu; int last_residency; @@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver( static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } #endif -#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) +#ifdef CONFIG_CPU_IDLE extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev); +extern void cpuidle_use_deepest_state(bool enable); #else static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev) @@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } +static inline void cpuidle_use_deepest_state(bool enable) +{ +} #endif /* kernel/sched/idle.c */ @@ -235,8 +240,6 @@ struct cpuidle_governor { int (*select) (struct cpuidle_driver *drv, struct cpuidle_device *dev); void (*reflect) (struct cpuidle_device *dev, int index); - - struct module *owner; }; #ifdef CONFIG_CPU_IDLE diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7cee5551625b..167aea29d41e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -50,6 +50,8 @@ #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e @@ -60,6 +62,7 @@ #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 #define CRYPTO_ALG_DEAD 0x00000020 @@ -87,7 +90,7 @@ #define CRYPTO_ALG_TESTED 0x00000400 /* - * Set if the algorithm is an instance that is build from templates. + * Set if the algorithm is an instance that is built from templates. */ #define CRYPTO_ALG_INSTANCE 0x00000800 diff --git a/include/linux/dax.h b/include/linux/dax.h index add6c4bc568f..f97bcfe79472 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -8,25 +8,44 @@ struct iomap_ops; -/* We use lowest available exceptional entry bit for locking */ +/* + * We use lowest available bit in exceptional entry for locking, one bit for + * the entry size (PMD) and two more to tell us if the entry is a huge zero + * page (HZP) or an empty entry that is just used for locking. In total four + * special bits. + * + * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and + * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem + * block allocation. + */ +#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) +#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) +#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) +#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) -ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, +static inline unsigned long dax_radix_sector(void *entry) +{ + return (unsigned long)entry >> RADIX_DAX_SHIFT; +} + +static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) +{ + return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | + ((unsigned long)sector << RADIX_DAX_SHIFT) | + RADIX_DAX_ENTRY_LOCK); +} + +ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops); -ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, - get_block_t, dio_iodone_t, int flags); -int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); -int dax_truncate_page(struct inode *, loff_t from, get_block_t); -int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, +int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap_ops *ops); -int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, - pgoff_t index, bool wake_all); + pgoff_t index, void *entry, bool wake_all); #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); -void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length); #else @@ -35,12 +54,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev, { return ERR_PTR(-ENXIO); } -/* Shouldn't ever be called when dax is disabled. */ -static inline void dax_unlock_mapping_entry(struct address_space *mapping, - pgoff_t index) -{ - BUG(); -} static inline int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length) { @@ -48,18 +61,28 @@ static inline int __dax_zero_page_range(struct block_device *bdev, } #endif -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) -int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t); +#ifdef CONFIG_FS_DAX_PMD +static inline unsigned int dax_radix_order(void *entry) +{ + if ((unsigned long)entry & RADIX_DAX_PMD) + return PMD_SHIFT - PAGE_SHIFT; + return 0; +} +int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmd, unsigned int flags, struct iomap_ops *ops); #else -static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags, get_block_t gb) +static inline unsigned int dax_radix_order(void *entry) +{ + return 0; +} +static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, unsigned int flags, + struct iomap_ops *ops) { return VM_FAULT_FALLBACK; } #endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); -#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d3f0d1aec73..014cc564d1c4 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu; * Must only be called under the protection established by * debugfs_use_file_start(). */ -static inline const struct file_operations *debugfs_real_fops(struct file *filp) +static inline const struct file_operations * +debugfs_real_fops(const struct file *filp) __must_hold(&debugfs_srcu) { /* @@ -62,6 +63,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp) return filp->f_path.dentry->d_fsdata; } +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = debugfs_attr_read, \ + .write = debugfs_attr_write, \ + .llseek = generic_file_llseek, \ +} + #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_create_file(const char *name, umode_t mode, @@ -99,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, ssize_t debugfs_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ -static int __fops ## _open(struct inode *inode, struct file *file) \ -{ \ - __simple_attr_check_format(__fmt, 0ull); \ - return simple_attr_open(inode, file, __get, __set, __fmt); \ -} \ -static const struct file_operations __fops = { \ - .owner = THIS_MODULE, \ - .open = __fops ## _open, \ - .release = simple_attr_release, \ - .read = debugfs_attr_read, \ - .write = debugfs_attr_write, \ - .llseek = generic_file_llseek, \ -} - struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -233,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu) { } -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ - static const struct file_operations __fops = { 0 } +static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t debugfs_attr_write(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 7adf6cc4b305..c35d0c0e0ada 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -20,7 +20,6 @@ #include <linux/devfreq.h> #include <linux/thermal.h> -#ifdef CONFIG_DEVFREQ_THERMAL /** * struct devfreq_cooling_power - Devfreq cooling power ops @@ -37,12 +36,16 @@ * @dyn_power_coeff * frequency * voltage^2 */ struct devfreq_cooling_power { - unsigned long (*get_static_power)(unsigned long voltage); - unsigned long (*get_dynamic_power)(unsigned long freq, + unsigned long (*get_static_power)(struct devfreq *devfreq, + unsigned long voltage); + unsigned long (*get_dynamic_power)(struct devfreq *devfreq, + unsigned long freq, unsigned long voltage); unsigned long dyn_power_coeff; }; +#ifdef CONFIG_DEVFREQ_THERMAL + struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power); diff --git a/include/linux/device.h b/include/linux/device.h index bc41e87a969b..491b4c0ca633 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys, * @name: Name of the class. * @owner: The module owner. * @class_attrs: Default attributes of this class. + * @class_groups: Default attributes of this class. * @dev_groups: Default attributes of the devices that belong to the class. * @dev_kobj: The kobject that represents this class and links it into the hierarchy. * @dev_uevent: Called when a device is added, removed from this class, or a @@ -390,6 +391,7 @@ struct class { struct module *owner; struct class_attribute *class_attrs; + const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; @@ -465,6 +467,8 @@ struct class_attribute { struct class_attribute class_attr_##_name = __ATTR_RW(_name) #define CLASS_ATTR_RO(_name) \ struct class_attribute class_attr_##_name = __ATTR_RO(_name) +#define CLASS_ATTR_WO(_name) \ + struct class_attribute class_attr_##_name = __ATTR_WO(_name) extern int __must_check class_create_file_ns(struct class *class, const struct class_attribute *attr, @@ -698,6 +702,25 @@ static inline int devm_add_action_or_reset(struct device *dev, return ret; } +/** + * devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @type: Type to allocate per-cpu memory for + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +#define devm_alloc_percpu(dev, type) \ + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ + __alignof__(type))) + +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, + size_t align); +void devm_free_percpu(struct device *dev, void __percpu *pdata); + struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about @@ -708,6 +731,87 @@ struct device_dma_parameters { }; /** + * enum device_link_state - Device link states. + * @DL_STATE_NONE: The presence of the drivers is not being tracked. + * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. + * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. + * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). + * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. + * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. + */ +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE, + DL_STATE_CONSUMER_PROBE, + DL_STATE_ACTIVE, + DL_STATE_SUPPLIER_UNBIND, +}; + +/* + * Device link flags. + * + * STATELESS: The core won't track the presence of supplier/consumer drivers. + * AUTOREMOVE: Remove this link automatically on consumer driver unbind. + * PM_RUNTIME: If set, the runtime PM framework will use this link. + * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. + */ +#define DL_FLAG_STATELESS BIT(0) +#define DL_FLAG_AUTOREMOVE BIT(1) +#define DL_FLAG_PM_RUNTIME BIT(2) +#define DL_FLAG_RPM_ACTIVE BIT(3) + +/** + * struct device_link - Device link representation. + * @supplier: The device on the supplier end of the link. + * @s_node: Hook to the supplier device's list of links to consumers. + * @consumer: The device on the consumer end of the link. + * @c_node: Hook to the consumer device's list of links to suppliers. + * @status: The state of the link (with respect to the presence of drivers). + * @flags: Link flags. + * @rpm_active: Whether or not the consumer device is runtime-PM-active. + * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. + */ +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + enum device_link_state status; + u32 flags; + bool rpm_active; +#ifdef CONFIG_SRCU + struct rcu_head rcu_head; +#endif +}; + +/** + * enum dl_dev_state - Device driver presence tracking information. + * @DL_DEV_NO_DRIVER: There is no driver attached to the device. + * @DL_DEV_PROBING: A driver is probing. + * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. + * @DL_DEV_UNBINDING: The driver is unbinding from the device. + */ +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING, + DL_DEV_DRIVER_BOUND, + DL_DEV_UNBINDING, +}; + +/** + * struct dev_links_info - Device data related to device links. + * @suppliers: List of links to supplier devices. + * @consumers: List of links to consumer devices. + * @status: Driver status information. + */ +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + enum dl_dev_state status; +}; + +/** * struct device - The basic device structure * @parent: The device's "parent" device, the device to which it is attached. * In most cases, a parent device is some sort of bus or host @@ -732,8 +836,9 @@ struct device_dma_parameters { * on. This shrinks the "Board Support Packages" (BSPs) and * minimizes board-specific #ifdefs in drivers. * @driver_data: Private pointer for driver specific info. + * @links: Links to suppliers and consumers of this device. * @power: For device power management. - * See Documentation/power/devices.txt for details. + * See Documentation/power/admin-guide/devices.rst for details. * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. @@ -799,6 +904,7 @@ struct device { core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set/get_drvdata */ + struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; @@ -1116,6 +1222,10 @@ extern void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(const struct device *dev); +/* Device links interface. */ +struct device_link *device_link_add(struct device *consumer, + struct device *supplier, u32 flags); +void device_link_del(struct device_link *link); #ifdef CONFIG_PRINTK diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index b91b023deffb..a52c6580cc9a 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -58,7 +58,7 @@ struct dm_io_notify { struct dm_io_client; struct dm_io_request { int bi_op; /* REQ_OP */ - int bi_op_flags; /* rq_flag_bits */ + int bi_op_flags; /* req_flag_bits */ struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_client *client; /* Client memory handler */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index e0b0741ae671..8daeb3ce0016 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -30,7 +30,7 @@ #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/fs.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/wait.h> struct device; @@ -143,7 +143,7 @@ struct dma_buf { wait_queue_head_t poll; struct dma_buf_poll_cb_t { - struct fence_cb cb; + struct dma_fence_cb cb; wait_queue_head_t *poll; unsigned long active; diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h new file mode 100644 index 000000000000..5900945f962d --- /dev/null +++ b/include/linux/dma-fence-array.h @@ -0,0 +1,86 @@ +/* + * fence-array: aggregates fence to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_ARRAY_H +#define __LINUX_DMA_FENCE_ARRAY_H + +#include <linux/dma-fence.h> + +/** + * struct dma_fence_array_cb - callback helper for fence array + * @cb: fence callback structure for signaling + * @array: reference to the parent fence array object + */ +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +/** + * struct dma_fence_array - fence to represent an array of fences + * @base: fence base class + * @lock: spinlock for fence handling + * @num_fences: number of fences in the array + * @num_pending: fences in the array still pending + * @fences: array of the fences + */ +struct dma_fence_array { + struct dma_fence base; + + spinlock_t lock; + unsigned num_fences; + atomic_t num_pending; + struct dma_fence **fences; +}; + +extern const struct dma_fence_ops dma_fence_array_ops; + +/** + * dma_fence_is_array - check if a fence is from the array subsclass + * @fence: fence to test + * + * Return true if it is a dma_fence_array and false otherwise. + */ +static inline bool dma_fence_is_array(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_array_ops; +} + +/** + * to_dma_fence_array - cast a fence to a dma_fence_array + * @fence: fence to cast to a dma_fence_array + * + * Returns NULL if the fence is not a dma_fence_array, + * or the dma_fence_array otherwise. + */ +static inline struct dma_fence_array * +to_dma_fence_array(struct dma_fence *fence) +{ + if (fence->ops != &dma_fence_array_ops) + return NULL; + + return container_of(fence, struct dma_fence_array, base); +} + +struct dma_fence_array *dma_fence_array_create(int num_fences, + struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + +#endif /* __LINUX_DMA_FENCE_ARRAY_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h new file mode 100644 index 000000000000..d51a7d23c358 --- /dev/null +++ b/include/linux/dma-fence.h @@ -0,0 +1,438 @@ +/* + * Fence mechanism for dma-buf to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Maarten Lankhorst <maarten.lankhorst@canonical.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_H +#define __LINUX_DMA_FENCE_H + +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/bitops.h> +#include <linux/kref.h> +#include <linux/sched.h> +#include <linux/printk.h> +#include <linux/rcupdate.h> + +struct dma_fence; +struct dma_fence_ops; +struct dma_fence_cb; + +/** + * struct dma_fence - software synchronization primitive + * @refcount: refcount for this fence + * @ops: dma_fence_ops associated with this fence + * @rcu: used for releasing fence with kfree_rcu + * @cb_list: list of all callbacks to call + * @lock: spin_lock_irqsave used for locking + * @context: execution context this fence belongs to, returned by + * dma_fence_context_alloc() + * @seqno: the sequence number of this fence inside the execution context, + * can be compared to decide which fence would be signaled later. + * @flags: A mask of DMA_FENCE_FLAG_* defined below + * @timestamp: Timestamp when the fence was signaled. + * @status: Optional, only valid if < 0, must be set before calling + * dma_fence_signal, indicates that the fence has completed with an error. + * + * the flags member must be manipulated and read using the appropriate + * atomic ops (bit_*), so taking the spinlock will not be needed most + * of the time. + * + * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called + * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the + * implementer of the fence for its own purposes. Can be used in different + * ways by different fence implementers, so do not rely on this. + * + * Since atomic bitops are used, this is not guaranteed to be the case. + * Particularly, if the bit was set, but dma_fence_signal was called right + * before this bit was set, it would have been able to set the + * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. + * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that + * after dma_fence_signal was called, any enable_signaling call will have either + * been completed, or never called at all. + */ +struct dma_fence { + struct kref refcount; + const struct dma_fence_ops *ops; + struct rcu_head rcu; + struct list_head cb_list; + spinlock_t *lock; + u64 context; + unsigned seqno; + unsigned long flags; + ktime_t timestamp; + int status; +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ +}; + +typedef void (*dma_fence_func_t)(struct dma_fence *fence, + struct dma_fence_cb *cb); + +/** + * struct dma_fence_cb - callback for dma_fence_add_callback + * @node: used by dma_fence_add_callback to append this struct to fence::cb_list + * @func: dma_fence_func_t to call + * + * This struct will be initialized by dma_fence_add_callback, additional + * data can be passed along by embedding dma_fence_cb in another struct. + */ +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +/** + * struct dma_fence_ops - operations implemented for fence + * @get_driver_name: returns the driver name. + * @get_timeline_name: return the name of the context this fence belongs to. + * @enable_signaling: enable software signaling of fence. + * @signaled: [optional] peek whether the fence is signaled, can be null. + * @wait: custom wait implementation, or dma_fence_default_wait. + * @release: [optional] called on destruction of fence, can be null + * @fill_driver_data: [optional] callback to fill in free-form debug info + * Returns amount of bytes filled, or -errno. + * @fence_value_str: [optional] fills in the value of the fence as a string + * @timeline_value_str: [optional] fills in the current value of the timeline + * as a string + * + * Notes on enable_signaling: + * For fence implementations that have the capability for hw->hw + * signaling, they can implement this op to enable the necessary + * irqs, or insert commands into cmdstream, etc. This is called + * in the first wait() or add_callback() path to let the fence + * implementation know that there is another driver waiting on + * the signal (ie. hw->sw case). + * + * This function can be called called from atomic context, but not + * from irq context, so normal spinlocks can be used. + * + * A return value of false indicates the fence already passed, + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. + * + * fence->status may be set in enable_signaling, but only when false is + * returned. + * + * Calling dma_fence_signal before enable_signaling is called allows + * for a tiny race window in which enable_signaling is called during, + * before, or after dma_fence_signal. To fight this, it is recommended + * that before enable_signaling returns true an extra reference is + * taken on the fence, to be released when the fence is signaled. + * This will mean dma_fence_signal will still be called twice, but + * the second time will be a noop since it was already signaled. + * + * Notes on signaled: + * May set fence->status if returning true. + * + * Notes on wait: + * Must not be NULL, set to dma_fence_default_wait for default implementation. + * the dma_fence_default_wait implementation should work for any fence, as long + * as enable_signaling works correctly. + * + * Must return -ERESTARTSYS if the wait is intr = true and the wait was + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait + * timed out. Can also return other error values on custom implementations, + * which should be treated as if the fence is signaled. For example a hardware + * lockup could be reported like that. + * + * Notes on release: + * Can be NULL, this function allows additional commands to run on + * destruction of the fence. Can be called from irq context. + * If pointer is set to NULL, kfree will get called instead. + */ + +struct dma_fence_ops { + const char * (*get_driver_name)(struct dma_fence *fence); + const char * (*get_timeline_name)(struct dma_fence *fence); + bool (*enable_signaling)(struct dma_fence *fence); + bool (*signaled)(struct dma_fence *fence); + signed long (*wait)(struct dma_fence *fence, + bool intr, signed long timeout); + void (*release)(struct dma_fence *fence); + + int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); + void (*fence_value_str)(struct dma_fence *fence, char *str, int size); + void (*timeline_value_str)(struct dma_fence *fence, + char *str, int size); +}; + +void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, unsigned seqno); + +void dma_fence_release(struct kref *kref); +void dma_fence_free(struct dma_fence *fence); + +/** + * dma_fence_put - decreases refcount of the fence + * @fence: [in] fence to reduce refcount of + */ +static inline void dma_fence_put(struct dma_fence *fence) +{ + if (fence) + kref_put(&fence->refcount, dma_fence_release); +} + +/** + * dma_fence_get - increases refcount of the fence + * @fence: [in] fence to increase refcount of + * + * Returns the same fence, with refcount increased by 1. + */ +static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) +{ + if (fence) + kref_get(&fence->refcount); + return fence; +} + +/** + * dma_fence_get_rcu - get a fence from a reservation_object_list with + * rcu read lock + * @fence: [in] fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + */ +static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) +{ + if (kref_get_unless_zero(&fence->refcount)) + return fence; + else + return NULL; +} + +/** + * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence + * @fencep: [in] pointer to fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + * This function handles acquiring a reference to a fence that may be + * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU), + * so long as the caller is using RCU on the pointer to the fence. + * + * An alternative mechanism is to employ a seqlock to protect a bunch of + * fences, such as used by struct reservation_object. When using a seqlock, + * the seqlock must be taken before and checked after a reference to the + * fence is acquired (as shown here). + * + * The caller is required to hold the RCU read lock. + */ +static inline struct dma_fence * +dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) +{ + do { + struct dma_fence *fence; + + fence = rcu_dereference(*fencep); + if (!fence || !dma_fence_get_rcu(fence)) + return NULL; + + /* The atomic_inc_not_zero() inside dma_fence_get_rcu() + * provides a full memory barrier upon success (such as now). + * This is paired with the write barrier from assigning + * to the __rcu protected fence pointer so that if that + * pointer still matches the current fence, we know we + * have successfully acquire a reference to it. If it no + * longer matches, we are holding a reference to some other + * reallocated pointer. This is possible if the allocator + * is using a freelist like SLAB_DESTROY_BY_RCU where the + * fence remains valid for the RCU grace period, but it + * may be reallocated. When using such allocators, we are + * responsible for ensuring the reference we get is to + * the right fence, as below. + */ + if (fence == rcu_access_pointer(*fencep)) + return rcu_pointer_handoff(fence); + + dma_fence_put(fence); + } while (1); +} + +int dma_fence_signal(struct dma_fence *fence); +int dma_fence_signal_locked(struct dma_fence *fence); +signed long dma_fence_default_wait(struct dma_fence *fence, + bool intr, signed long timeout); +int dma_fence_add_callback(struct dma_fence *fence, + struct dma_fence_cb *cb, + dma_fence_func_t func); +bool dma_fence_remove_callback(struct dma_fence *fence, + struct dma_fence_cb *cb); +void dma_fence_enable_sw_signaling(struct dma_fence *fence); + +/** + * dma_fence_is_signaled_locked - Return an indication if the fence + * is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback, dma_fence_wait or + * dma_fence_enable_sw_signaling haven't been called before. + * + * This function requires fence->lock to be held. + */ +static inline bool +dma_fence_is_signaled_locked(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal_locked(fence); + return true; + } + + return false; +} + +/** + * dma_fence_is_signaled - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback, dma_fence_wait or + * dma_fence_enable_sw_signaling haven't been called before. + * + * It's recommended for seqno fences to call dma_fence_signal when the + * operation is complete, it makes it possible to prevent issues from + * wraparound between time of issue and time of use by checking the return + * value of this function before calling hardware-specific wait instructions. + */ +static inline bool +dma_fence_is_signaled(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal(fence); + return true; + } + + return false; +} + +/** + * dma_fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool dma_fence_is_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return (int)(f1->seqno - f2->seqno) > 0; +} + +/** + * dma_fence_later - return the chronologically later fence + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns NULL if both fences are signaled, otherwise the fence that would be + * signaled last. Both fences must be from the same context, since a seqno is + * not re-used across contexts. + */ +static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return NULL; + + /* + * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never + * have been set if enable_signaling wasn't called, and enabling that + * here is overkill. + */ + if (dma_fence_is_later(f1, f2)) + return dma_fence_is_signaled(f1) ? NULL : f1; + else + return dma_fence_is_signaled(f2) ? NULL : f2; +} + +signed long dma_fence_wait_timeout(struct dma_fence *, + bool intr, signed long timeout); +signed long dma_fence_wait_any_timeout(struct dma_fence **fences, + uint32_t count, + bool intr, signed long timeout, + uint32_t *idx); + +/** + * dma_fence_wait - sleep until the fence gets signaled + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * + * This function will return -ERESTARTSYS if interrupted by a signal, + * or 0 if the fence was signaled. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly holds a reference to the fence, otherwise the + * fence might be freed before return, resulting in undefined behavior. + */ +static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) +{ + signed long ret; + + /* Since dma_fence_wait_timeout cannot timeout with + * MAX_SCHEDULE_TIMEOUT, only valid return values are + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. + */ + ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + + return ret < 0 ? ret : 0; +} + +u64 dma_fence_context_alloc(unsigned num); + +#define DMA_FENCE_TRACE(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ + pr_info("f %llu#%u: " fmt, \ + __ff->context, __ff->seqno, ##args); \ + } while (0) + +#define DMA_FENCE_WARN(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#define DMA_FENCE_ERR(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 08528afdf58b..10c5a17b1f51 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -243,29 +243,33 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg ops->unmap_sg(dev, sg, nents, dir, attrs); } -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir) +static inline dma_addr_t dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, page, offset, size, dir, 0); + addr = ops->map_page(dev, page, offset, size, dir, attrs); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return addr; } -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) +static inline void dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, 0); + ops->unmap_page(dev, addr, size, dir, attrs); debug_dma_unmap_page(dev, addr, size, dir, false); } @@ -385,6 +389,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) +#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) +#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index c934d3a96b5e..2896f93808ae 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -67,7 +67,7 @@ * genl_magic_func.h * generates an entry in the static genl_ops array, * and static register/unregister functions to - * genl_register_family_with_ops(). + * genl_register_family(). * * flags and handler: * GENL_op_init( .doit = x, .dumpit = y, .flags = something) diff --git a/include/linux/edac.h b/include/linux/edac.h index 9e0d78966552..cb56dcba68c6 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -134,6 +134,7 @@ enum dev_type { enum hw_event_mc_err_type { HW_EVENT_ERR_CORRECTED, HW_EVENT_ERR_UNCORRECTED, + HW_EVENT_ERR_DEFERRED, HW_EVENT_ERR_FATAL, HW_EVENT_ERR_INFO, }; @@ -145,6 +146,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) return "Corrected"; case HW_EVENT_ERR_UNCORRECTED: return "Uncorrected"; + case HW_EVENT_ERR_DEFERRED: + return "Deferred"; case HW_EVENT_ERR_FATAL: return "Fatal"; default: @@ -192,10 +195,11 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR3: DDR3 RAM * @MEM_RDDR3: Registered DDR3 RAM * This is a variant of the DDR3 memories. - * @MEM_LRDDR3 Load-Reduced DDR3 memory. + * @MEM_LRDDR3: Load-Reduced DDR3 memory. * @MEM_DDR4: Unbuffered DDR4 RAM * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. + * @MEM_LRDDR4: Load-Reduced DDR4 memory. */ enum mem_type { MEM_EMPTY = 0, @@ -218,6 +222,7 @@ enum mem_type { MEM_LRDDR3, MEM_DDR4, MEM_RDDR4, + MEM_LRDDR4, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -239,6 +244,7 @@ enum mem_type { #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) #define MEM_FLAG_DDR4 BIT(MEM_DDR4) #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) +#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) /** * enum edac-type - Error Detection and Correction capabilities and mode diff --git a/include/linux/efi.h b/include/linux/efi.h index 2d089487d2da..a07a476178cd 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -443,6 +443,22 @@ typedef struct { #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 #define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 +typedef struct { + u32 version; + u32 get; + u32 set; + u32 del; + u32 get_all; +} apple_properties_protocol_32_t; + +typedef struct { + u64 version; + u64 get; + u64 set; + u64 del; + u64 get_all; +} apple_properties_protocol_64_t; + /* * Types and defines for EFI ResetSystem */ @@ -589,8 +605,10 @@ void efi_native_runtime_setup(void); #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) #define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) +#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) /* * This GUID is used to pass to the kernel proper the struct screen_info @@ -599,6 +617,7 @@ void efi_native_runtime_setup(void); */ #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) +#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) typedef struct { efi_guid_t guid; @@ -872,6 +891,7 @@ extern struct efi { unsigned long esrt; /* ESRT table */ unsigned long properties_table; /* properties table */ unsigned long mem_attr_table; /* memory attributes table */ + unsigned long rng_seed; /* UEFI firmware random seed */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -1145,6 +1165,26 @@ struct efi_generic_dev_path { u16 length; } __attribute ((packed)); +struct efi_dev_path { + u8 type; /* can be replaced with unnamed */ + u8 sub_type; /* struct efi_generic_dev_path; */ + u16 length; /* once we've moved to -std=c11 */ + union { + struct { + u32 hid; + u32 uid; + } acpi; + struct { + u8 fn; + u8 dev; + } pci; + }; +} __attribute ((packed)); + +#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER) +struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len); +#endif + static inline void memrange_efi_to_native(u64 *addr, u64 *npages) { *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); @@ -1493,4 +1533,10 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, struct efi_boot_memmap *map, void *priv, efi_exit_boot_map_processing priv_func); + +struct linux_efi_random_seed { + u32 size; + u8 bits[]; +}; + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index e7f358d2e5fc..b276e9ef0e0b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); -typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); +typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int); typedef void (elevator_init_icq_fn) (struct io_cq *); typedef void (elevator_exit_icq_fn) (struct io_cq *); @@ -108,6 +108,11 @@ struct elevator_type #define ELV_HASH_BITS 6 +void elv_rqhash_del(struct request_queue *q, struct request *rq); +void elv_rqhash_add(struct request_queue *q, struct request *rq); +void elv_rqhash_reposition(struct request_queue *q, struct request *rq); +struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); + /* * each queue has an elevator_queue associated with it */ @@ -139,7 +144,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request extern struct request *elv_latter_request(struct request_queue *, struct request *); extern int elv_register_queue(struct request_queue *q); extern void elv_unregister_queue(struct request_queue *q); -extern int elv_may_queue(struct request_queue *, int, int); +extern int elv_may_queue(struct request_queue *, unsigned int); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 422630b8e588..cea41a124a80 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -52,10 +52,17 @@ #define VERSION_LEN 256 #define MAX_VOLUME_NAME 512 +#define MAX_PATH_LEN 64 +#define MAX_DEVICES 8 /* * For superblock */ +struct f2fs_device { + __u8 path[MAX_PATH_LEN]; + __le32 total_segments; +} __packed; + struct f2fs_super_block { __le32 magic; /* Magic Number */ __le16 major_ver; /* Major Version */ @@ -94,7 +101,8 @@ struct f2fs_super_block { __le32 feature; /* defined features */ __u8 encryption_level; /* versioning level for encryption */ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ - __u8 reserved[871]; /* valid reserved region */ + struct f2fs_device devs[MAX_DEVICES]; /* device list */ + __u8 reserved[327]; /* valid reserved region */ } __packed; /* diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h index 9a79f0106da1..32c22cfb238b 100644 --- a/include/linux/fddidevice.h +++ b/include/linux/fddidevice.h @@ -26,7 +26,6 @@ #ifdef __KERNEL__ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); -int fddi_change_mtu(struct net_device *dev, int new_mtu); struct net_device *alloc_fddidev(int sizeof_priv); #endif diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h deleted file mode 100644 index a44794e508df..000000000000 --- a/include/linux/fence-array.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * fence-array: aggregates fence to be waited together - * - * Copyright (C) 2016 Collabora Ltd - * Copyright (C) 2016 Advanced Micro Devices, Inc. - * Authors: - * Gustavo Padovan <gustavo@padovan.org> - * Christian König <christian.koenig@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __LINUX_FENCE_ARRAY_H -#define __LINUX_FENCE_ARRAY_H - -#include <linux/fence.h> - -/** - * struct fence_array_cb - callback helper for fence array - * @cb: fence callback structure for signaling - * @array: reference to the parent fence array object - */ -struct fence_array_cb { - struct fence_cb cb; - struct fence_array *array; -}; - -/** - * struct fence_array - fence to represent an array of fences - * @base: fence base class - * @lock: spinlock for fence handling - * @num_fences: number of fences in the array - * @num_pending: fences in the array still pending - * @fences: array of the fences - */ -struct fence_array { - struct fence base; - - spinlock_t lock; - unsigned num_fences; - atomic_t num_pending; - struct fence **fences; -}; - -extern const struct fence_ops fence_array_ops; - -/** - * fence_is_array - check if a fence is from the array subsclass - * - * Return true if it is a fence_array and false otherwise. - */ -static inline bool fence_is_array(struct fence *fence) -{ - return fence->ops == &fence_array_ops; -} - -/** - * to_fence_array - cast a fence to a fence_array - * @fence: fence to cast to a fence_array - * - * Returns NULL if the fence is not a fence_array, - * or the fence_array otherwise. - */ -static inline struct fence_array *to_fence_array(struct fence *fence) -{ - if (fence->ops != &fence_array_ops) - return NULL; - - return container_of(fence, struct fence_array, base); -} - -struct fence_array *fence_array_create(int num_fences, struct fence **fences, - u64 context, unsigned seqno, - bool signal_on_any); - -#endif /* __LINUX_FENCE_ARRAY_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h deleted file mode 100644 index 0d763053f97a..000000000000 --- a/include/linux/fence.h +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Fence mechanism for dma-buf to allow for asynchronous dma access - * - * Copyright (C) 2012 Canonical Ltd - * Copyright (C) 2012 Texas Instruments - * - * Authors: - * Rob Clark <robdclark@gmail.com> - * Maarten Lankhorst <maarten.lankhorst@canonical.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __LINUX_FENCE_H -#define __LINUX_FENCE_H - -#include <linux/err.h> -#include <linux/wait.h> -#include <linux/list.h> -#include <linux/bitops.h> -#include <linux/kref.h> -#include <linux/sched.h> -#include <linux/printk.h> -#include <linux/rcupdate.h> - -struct fence; -struct fence_ops; -struct fence_cb; - -/** - * struct fence - software synchronization primitive - * @refcount: refcount for this fence - * @ops: fence_ops associated with this fence - * @rcu: used for releasing fence with kfree_rcu - * @cb_list: list of all callbacks to call - * @lock: spin_lock_irqsave used for locking - * @context: execution context this fence belongs to, returned by - * fence_context_alloc() - * @seqno: the sequence number of this fence inside the execution context, - * can be compared to decide which fence would be signaled later. - * @flags: A mask of FENCE_FLAG_* defined below - * @timestamp: Timestamp when the fence was signaled. - * @status: Optional, only valid if < 0, must be set before calling - * fence_signal, indicates that the fence has completed with an error. - * - * the flags member must be manipulated and read using the appropriate - * atomic ops (bit_*), so taking the spinlock will not be needed most - * of the time. - * - * FENCE_FLAG_SIGNALED_BIT - fence is already signaled - * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* - * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the - * implementer of the fence for its own purposes. Can be used in different - * ways by different fence implementers, so do not rely on this. - * - * Since atomic bitops are used, this is not guaranteed to be the case. - * Particularly, if the bit was set, but fence_signal was called right - * before this bit was set, it would have been able to set the - * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. - * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting - * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that - * after fence_signal was called, any enable_signaling call will have either - * been completed, or never called at all. - */ -struct fence { - struct kref refcount; - const struct fence_ops *ops; - struct rcu_head rcu; - struct list_head cb_list; - spinlock_t *lock; - u64 context; - unsigned seqno; - unsigned long flags; - ktime_t timestamp; - int status; -}; - -enum fence_flag_bits { - FENCE_FLAG_SIGNALED_BIT, - FENCE_FLAG_ENABLE_SIGNAL_BIT, - FENCE_FLAG_USER_BITS, /* must always be last member */ -}; - -typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); - -/** - * struct fence_cb - callback for fence_add_callback - * @node: used by fence_add_callback to append this struct to fence::cb_list - * @func: fence_func_t to call - * - * This struct will be initialized by fence_add_callback, additional - * data can be passed along by embedding fence_cb in another struct. - */ -struct fence_cb { - struct list_head node; - fence_func_t func; -}; - -/** - * struct fence_ops - operations implemented for fence - * @get_driver_name: returns the driver name. - * @get_timeline_name: return the name of the context this fence belongs to. - * @enable_signaling: enable software signaling of fence. - * @signaled: [optional] peek whether the fence is signaled, can be null. - * @wait: custom wait implementation, or fence_default_wait. - * @release: [optional] called on destruction of fence, can be null - * @fill_driver_data: [optional] callback to fill in free-form debug info - * Returns amount of bytes filled, or -errno. - * @fence_value_str: [optional] fills in the value of the fence as a string - * @timeline_value_str: [optional] fills in the current value of the timeline - * as a string - * - * Notes on enable_signaling: - * For fence implementations that have the capability for hw->hw - * signaling, they can implement this op to enable the necessary - * irqs, or insert commands into cmdstream, etc. This is called - * in the first wait() or add_callback() path to let the fence - * implementation know that there is another driver waiting on - * the signal (ie. hw->sw case). - * - * This function can be called called from atomic context, but not - * from irq context, so normal spinlocks can be used. - * - * A return value of false indicates the fence already passed, - * or some failure occurred that made it impossible to enable - * signaling. True indicates successful enabling. - * - * fence->status may be set in enable_signaling, but only when false is - * returned. - * - * Calling fence_signal before enable_signaling is called allows - * for a tiny race window in which enable_signaling is called during, - * before, or after fence_signal. To fight this, it is recommended - * that before enable_signaling returns true an extra reference is - * taken on the fence, to be released when the fence is signaled. - * This will mean fence_signal will still be called twice, but - * the second time will be a noop since it was already signaled. - * - * Notes on signaled: - * May set fence->status if returning true. - * - * Notes on wait: - * Must not be NULL, set to fence_default_wait for default implementation. - * the fence_default_wait implementation should work for any fence, as long - * as enable_signaling works correctly. - * - * Must return -ERESTARTSYS if the wait is intr = true and the wait was - * interrupted, and remaining jiffies if fence has signaled, or 0 if wait - * timed out. Can also return other error values on custom implementations, - * which should be treated as if the fence is signaled. For example a hardware - * lockup could be reported like that. - * - * Notes on release: - * Can be NULL, this function allows additional commands to run on - * destruction of the fence. Can be called from irq context. - * If pointer is set to NULL, kfree will get called instead. - */ - -struct fence_ops { - const char * (*get_driver_name)(struct fence *fence); - const char * (*get_timeline_name)(struct fence *fence); - bool (*enable_signaling)(struct fence *fence); - bool (*signaled)(struct fence *fence); - signed long (*wait)(struct fence *fence, bool intr, signed long timeout); - void (*release)(struct fence *fence); - - int (*fill_driver_data)(struct fence *fence, void *data, int size); - void (*fence_value_str)(struct fence *fence, char *str, int size); - void (*timeline_value_str)(struct fence *fence, char *str, int size); -}; - -void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno); - -void fence_release(struct kref *kref); -void fence_free(struct fence *fence); - -/** - * fence_get - increases refcount of the fence - * @fence: [in] fence to increase refcount of - * - * Returns the same fence, with refcount increased by 1. - */ -static inline struct fence *fence_get(struct fence *fence) -{ - if (fence) - kref_get(&fence->refcount); - return fence; -} - -/** - * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock - * @fence: [in] fence to increase refcount of - * - * Function returns NULL if no refcount could be obtained, or the fence. - */ -static inline struct fence *fence_get_rcu(struct fence *fence) -{ - if (kref_get_unless_zero(&fence->refcount)) - return fence; - else - return NULL; -} - -/** - * fence_put - decreases refcount of the fence - * @fence: [in] fence to reduce refcount of - */ -static inline void fence_put(struct fence *fence) -{ - if (fence) - kref_put(&fence->refcount, fence_release); -} - -int fence_signal(struct fence *fence); -int fence_signal_locked(struct fence *fence); -signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); -int fence_add_callback(struct fence *fence, struct fence_cb *cb, - fence_func_t func); -bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); -void fence_enable_sw_signaling(struct fence *fence); - -/** - * fence_is_signaled_locked - Return an indication if the fence is signaled yet. - * @fence: [in] the fence to check - * - * Returns true if the fence was already signaled, false if not. Since this - * function doesn't enable signaling, it is not guaranteed to ever return - * true if fence_add_callback, fence_wait or fence_enable_sw_signaling - * haven't been called before. - * - * This function requires fence->lock to be held. - */ -static inline bool -fence_is_signaled_locked(struct fence *fence) -{ - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return true; - - if (fence->ops->signaled && fence->ops->signaled(fence)) { - fence_signal_locked(fence); - return true; - } - - return false; -} - -/** - * fence_is_signaled - Return an indication if the fence is signaled yet. - * @fence: [in] the fence to check - * - * Returns true if the fence was already signaled, false if not. Since this - * function doesn't enable signaling, it is not guaranteed to ever return - * true if fence_add_callback, fence_wait or fence_enable_sw_signaling - * haven't been called before. - * - * It's recommended for seqno fences to call fence_signal when the - * operation is complete, it makes it possible to prevent issues from - * wraparound between time of issue and time of use by checking the return - * value of this function before calling hardware-specific wait instructions. - */ -static inline bool -fence_is_signaled(struct fence *fence) -{ - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return true; - - if (fence->ops->signaled && fence->ops->signaled(fence)) { - fence_signal(fence); - return true; - } - - return false; -} - -/** - * fence_is_later - return if f1 is chronologically later than f2 - * @f1: [in] the first fence from the same context - * @f2: [in] the second fence from the same context - * - * Returns true if f1 is chronologically later than f2. Both fences must be - * from the same context, since a seqno is not re-used across contexts. - */ -static inline bool fence_is_later(struct fence *f1, struct fence *f2) -{ - if (WARN_ON(f1->context != f2->context)) - return false; - - return (int)(f1->seqno - f2->seqno) > 0; -} - -/** - * fence_later - return the chronologically later fence - * @f1: [in] the first fence from the same context - * @f2: [in] the second fence from the same context - * - * Returns NULL if both fences are signaled, otherwise the fence that would be - * signaled last. Both fences must be from the same context, since a seqno is - * not re-used across contexts. - */ -static inline struct fence *fence_later(struct fence *f1, struct fence *f2) -{ - if (WARN_ON(f1->context != f2->context)) - return NULL; - - /* - * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been - * set if enable_signaling wasn't called, and enabling that here is - * overkill. - */ - if (fence_is_later(f1, f2)) - return fence_is_signaled(f1) ? NULL : f1; - else - return fence_is_signaled(f2) ? NULL : f2; -} - -signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); -signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, - bool intr, signed long timeout); - -/** - * fence_wait - sleep until the fence gets signaled - * @fence: [in] the fence to wait on - * @intr: [in] if true, do an interruptible wait - * - * This function will return -ERESTARTSYS if interrupted by a signal, - * or 0 if the fence was signaled. Other error values may be - * returned on custom implementations. - * - * Performs a synchronous wait on this fence. It is assumed the caller - * directly or indirectly holds a reference to the fence, otherwise the - * fence might be freed before return, resulting in undefined behavior. - */ -static inline signed long fence_wait(struct fence *fence, bool intr) -{ - signed long ret; - - /* Since fence_wait_timeout cannot timeout with - * MAX_SCHEDULE_TIMEOUT, only valid return values are - * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. - */ - ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); - - return ret < 0 ? ret : 0; -} - -u64 fence_context_alloc(unsigned num); - -#define FENCE_TRACE(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ - pr_info("f %llu#%u: " fmt, \ - __ff->context, __ff->seqno, ##args); \ - } while (0) - -#define FENCE_WARN(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - -#define FENCE_ERR(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - -#endif /* __LINUX_FENCE_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 1f09c521adfe..6a1658308612 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -14,6 +14,7 @@ #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/capability.h> +#include <linux/cryptohash.h> #include <net/sch_generic.h> @@ -56,6 +57,9 @@ struct bpf_prog_aux; /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 +/* Maximum BPF program size in bytes. */ +#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn)) + /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ @@ -402,14 +406,16 @@ struct bpf_prog { u16 jited:1, /* Is our filter JIT'ed? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ - dst_needed:1; /* Do we need dst entry? */ + dst_needed:1, /* Do we need dst entry? */ + xdp_adjust_head:1; /* Adjusting pkt head? */ kmemcheck_bitfield_end(meta); - u32 len; /* Number of filter blocks */ enum bpf_prog_type type; /* Type of BPF program */ + u32 len; /* Number of filter blocks */ + u32 digest[SHA_DIGEST_WORDS]; /* Program digest */ struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ - unsigned int (*bpf_func)(const struct sk_buff *skb, - const struct bpf_insn *filter); + unsigned int (*bpf_func)(const void *ctx, + const struct bpf_insn *insn); /* Instructions for interpreter */ union { struct sock_filter insns[0]; @@ -435,10 +441,11 @@ struct bpf_skb_data_end { struct xdp_buff { void *data; void *data_end; + void *data_hard_start; }; /* compute the linear packet data range [data, data_end) which - * will be accessed by cls_bpf and act_bpf programs + * will be accessed by cls_bpf, act_bpf and lwt programs */ static inline void bpf_compute_data_end(struct sk_buff *skb) { @@ -498,16 +505,16 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, return BPF_PROG_RUN(prog, skb); } -static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, - struct xdp_buff *xdp) +static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) { - u32 ret; - - rcu_read_lock(); - ret = BPF_PROG_RUN(prog, (void *)xdp); - rcu_read_unlock(); - - return ret; + /* Caller needs to hold rcu_read_lock() (!), otherwise program + * can be released while still running, or map elements could be + * freed early while still having concurrent users. XDP fastpath + * already takes rcu_read_lock() when fetching the program, so + * it's not necessary here anymore. + */ + return BPF_PROG_RUN(prog, xdp); } static inline unsigned int bpf_prog_size(unsigned int proglen) @@ -590,7 +597,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); -bool bpf_helper_changes_skb_data(void *func); +bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h new file mode 100644 index 000000000000..dba6e3c697c7 --- /dev/null +++ b/include/linux/fpga/fpga-bridge.h @@ -0,0 +1,60 @@ +#include <linux/device.h> +#include <linux/fpga/fpga-mgr.h> + +#ifndef _LINUX_FPGA_BRIDGE_H +#define _LINUX_FPGA_BRIDGE_H + +struct fpga_bridge; + +/** + * struct fpga_bridge_ops - ops for low level FPGA bridge drivers + * @enable_show: returns the FPGA bridge's status + * @enable_set: set a FPGA bridge as enabled or disabled + * @fpga_bridge_remove: set FPGA into a specific state during driver remove + */ +struct fpga_bridge_ops { + int (*enable_show)(struct fpga_bridge *bridge); + int (*enable_set)(struct fpga_bridge *bridge, bool enable); + void (*fpga_bridge_remove)(struct fpga_bridge *bridge); +}; + +/** + * struct fpga_bridge - FPGA bridge structure + * @name: name of low level FPGA bridge + * @dev: FPGA bridge device + * @mutex: enforces exclusive reference to bridge + * @br_ops: pointer to struct of FPGA bridge ops + * @info: fpga image specific information + * @node: FPGA bridge list node + * @priv: low level driver private date + */ +struct fpga_bridge { + const char *name; + struct device dev; + struct mutex mutex; /* for exclusive reference to bridge */ + const struct fpga_bridge_ops *br_ops; + struct fpga_image_info *info; + struct list_head node; + void *priv; +}; + +#define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev) + +struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, + struct fpga_image_info *info); +void fpga_bridge_put(struct fpga_bridge *bridge); +int fpga_bridge_enable(struct fpga_bridge *bridge); +int fpga_bridge_disable(struct fpga_bridge *bridge); + +int fpga_bridges_enable(struct list_head *bridge_list); +int fpga_bridges_disable(struct list_head *bridge_list); +void fpga_bridges_put(struct list_head *bridge_list); +int fpga_bridge_get_to_list(struct device_node *np, + struct fpga_image_info *info, + struct list_head *bridge_list); + +int fpga_bridge_register(struct device *dev, const char *name, + const struct fpga_bridge_ops *br_ops, void *priv); +void fpga_bridge_unregister(struct device *dev); + +#endif /* _LINUX_FPGA_BRIDGE_H */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 0940bf45e2f2..16551d5eac36 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -65,11 +65,26 @@ enum fpga_mgr_states { /* * FPGA Manager flags * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported + * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting */ #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) +#define FPGA_MGR_EXTERNAL_CONFIG BIT(1) + +/** + * struct fpga_image_info - information specific to a FPGA image + * @flags: boolean flags as defined above + * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) + * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) + */ +struct fpga_image_info { + u32 flags; + u32 enable_timeout_us; + u32 disable_timeout_us; +}; /** * struct fpga_manager_ops - ops for low level fpga manager drivers + * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state * @write_init: prepare the FPGA to receive confuration data * @write: write count bytes of configuration data to the FPGA @@ -81,11 +96,14 @@ enum fpga_mgr_states { * called, so leaving them out is fine. */ struct fpga_manager_ops { + size_t initial_header_size; enum fpga_mgr_states (*state)(struct fpga_manager *mgr); - int (*write_init)(struct fpga_manager *mgr, u32 flags, + int (*write_init)(struct fpga_manager *mgr, + struct fpga_image_info *info, const char *buf, size_t count); int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); - int (*write_complete)(struct fpga_manager *mgr, u32 flags); + int (*write_complete)(struct fpga_manager *mgr, + struct fpga_image_info *info); void (*fpga_remove)(struct fpga_manager *mgr); }; @@ -109,14 +127,17 @@ struct fpga_manager { #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) -int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, +int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count); -int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, +int fpga_mgr_firmware_load(struct fpga_manager *mgr, + struct fpga_image_info *info, const char *image_name); struct fpga_manager *of_fpga_mgr_get(struct device_node *node); +struct fpga_manager *fpga_mgr_get(struct device *dev); + void fpga_mgr_put(struct fpga_manager *mgr); int fpga_mgr_register(struct device *dev, const char *name, diff --git a/include/linux/fs.h b/include/linux/fs.h index dc0478c07b2a..b84230e070be 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -28,7 +28,6 @@ #include <linux/uidgid.h> #include <linux/lockdep.h> #include <linux/percpu-rwsem.h> -#include <linux/blk_types.h> #include <linux/workqueue.h> #include <linux/percpu-rwsem.h> #include <linux/delayed_call.h> @@ -38,6 +37,7 @@ struct backing_dev_info; struct bdi_writeback; +struct bio; struct export_operations; struct hd_geometry; struct iovec; @@ -152,58 +152,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define CHECK_IOVEC_ONLY -1 /* - * The below are the various read and write flags that we support. Some of - * them include behavioral modifiers that send information down to the - * block layer and IO scheduler. They should be used along with a req_op. - * Terminology: - * - * The block layer uses device plugging to defer IO a little bit, in - * the hope that we will see more IO very shortly. This increases - * coalescing of adjacent IO and thus reduces the number of IOs we - * have to send to the device. It also allows for better queuing, - * if the IO isn't mergeable. If the caller is going to be waiting - * for the IO, then he must ensure that the device is unplugged so - * that the IO is dispatched to the driver. - * - * All IO is handled async in Linux. This is fine for background - * writes, but for reads or writes that someone waits for completion - * on, we want to notify the block layer and IO scheduler so that they - * know about it. That allows them to make better scheduling - * decisions. So when the below references 'sync' and 'async', it - * is referencing this priority hint. - * - * With that in mind, the available types are: - * - * READ A normal read operation. Device will be plugged. - * READ_SYNC A synchronous read. Device is not plugged, caller can - * immediately wait on this read without caring about - * unplugging. - * WRITE A normal async write. Device will be plugged. - * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down - * the hint that someone will be waiting on this IO - * shortly. The write equivalent of READ_SYNC. - * WRITE_ODIRECT Special case write for O_DIRECT only. - * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. - * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on - * non-volatile media on completion. - * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded - * by a cache flush and data is guaranteed to be on - * non-volatile media on completion. - * - */ -#define RW_MASK REQ_OP_WRITE - -#define READ REQ_OP_READ -#define WRITE REQ_OP_WRITE - -#define READ_SYNC REQ_SYNC -#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) -#define WRITE_ODIRECT REQ_SYNC -#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) -#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) -#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) - -/* * Attribute flags. These should be or-ed together to figure out what * has been changed! */ @@ -2499,19 +2447,6 @@ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK -static inline bool op_is_write(unsigned int op) -{ - return op == REQ_OP_READ ? false : true; -} - -/* - * return data direction, READ or WRITE - */ -static inline int bio_data_dir(struct bio *bio) -{ - return op_is_write(bio_op(bio)) ? WRITE : READ; -} - extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); extern int revalidate_disk(struct gendisk *); @@ -2782,7 +2717,6 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); #ifdef CONFIG_BLOCK -extern blk_qc_t submit_bio(struct bio *); extern int bdev_read_only(struct block_device *); #endif extern int set_blocksize(struct block_device *, int); diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h index ff8b11b26f31..c074b670aa99 100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h @@ -18,73 +18,9 @@ #include <crypto/skcipher.h> #include <uapi/linux/fs.h> -#define FS_KEY_DERIVATION_NONCE_SIZE 16 -#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 - -#define FS_POLICY_FLAGS_PAD_4 0x00 -#define FS_POLICY_FLAGS_PAD_8 0x01 -#define FS_POLICY_FLAGS_PAD_16 0x02 -#define FS_POLICY_FLAGS_PAD_32 0x03 -#define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAGS_VALID 0x03 - -/* Encryption algorithms */ -#define FS_ENCRYPTION_MODE_INVALID 0 -#define FS_ENCRYPTION_MODE_AES_256_XTS 1 -#define FS_ENCRYPTION_MODE_AES_256_GCM 2 -#define FS_ENCRYPTION_MODE_AES_256_CBC 3 -#define FS_ENCRYPTION_MODE_AES_256_CTS 4 - -/** - * Encryption context for inode - * - * Protector format: - * 1 byte: Protector format (1 = this version) - * 1 byte: File contents encryption mode - * 1 byte: File names encryption mode - * 1 byte: Flags - * 8 bytes: Master Key descriptor - * 16 bytes: Encryption Key derivation nonce - */ -struct fscrypt_context { - u8 format; - u8 contents_encryption_mode; - u8 filenames_encryption_mode; - u8 flags; - u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; -} __packed; - -/* Encryption parameters */ -#define FS_XTS_TWEAK_SIZE 16 -#define FS_AES_128_ECB_KEY_SIZE 16 -#define FS_AES_256_GCM_KEY_SIZE 32 -#define FS_AES_256_CBC_KEY_SIZE 32 -#define FS_AES_256_CTS_KEY_SIZE 32 -#define FS_AES_256_XTS_KEY_SIZE 64 -#define FS_MAX_KEY_SIZE 64 - -#define FS_KEY_DESC_PREFIX "fscrypt:" -#define FS_KEY_DESC_PREFIX_SIZE 8 - -/* This is passed in from userspace into the kernel keyring */ -struct fscrypt_key { - u32 mode; - u8 raw[FS_MAX_KEY_SIZE]; - u32 size; -} __packed; - -struct fscrypt_info { - u8 ci_data_mode; - u8 ci_filename_mode; - u8 ci_flags; - struct crypto_skcipher *ci_ctfm; - struct key *ci_keyring_key; - u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; -}; +#define FS_CRYPTO_BLOCK_SIZE 16 -#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 -#define FS_WRITE_PATH_FL 0x00000002 +struct fscrypt_info; struct fscrypt_ctx { union { @@ -102,19 +38,6 @@ struct fscrypt_ctx { u8 mode; /* Encryption mode for tfm */ }; -struct fscrypt_completion_result { - struct completion completion; - int res; -}; - -#define DECLARE_FS_COMPLETION_RESULT(ecr) \ - struct fscrypt_completion_result ecr = { \ - COMPLETION_INITIALIZER((ecr).completion), 0 } - -#define FS_FNAME_NUM_SCATTER_ENTRIES 4 -#define FS_CRYPTO_BLOCK_SIZE 16 -#define FS_FNAME_CRYPTO_DIGEST_SIZE 32 - /** * For encrypted symlinks, the ciphertext length is stored at the beginning * of the string in little-endian format. @@ -154,9 +77,15 @@ struct fscrypt_name { #define fname_len(p) ((p)->disk_name.len) /* + * fscrypt superblock flags + */ +#define FS_CFLG_OWN_PAGES (1U << 1) + +/* * crypto opertions for filesystems */ struct fscrypt_operations { + unsigned int flags; int (*get_context)(struct inode *, void *, size_t); int (*key_prefix)(struct inode *, u8 **); int (*prepare_context)(struct inode *); @@ -206,7 +135,7 @@ static inline struct page *fscrypt_control_page(struct page *page) #endif } -static inline int fscrypt_has_encryption_key(struct inode *inode) +static inline int fscrypt_has_encryption_key(const struct inode *inode) { #if IS_ENABLED(CONFIG_FS_ENCRYPTION) return (inode->i_crypt_info != NULL); @@ -238,25 +167,25 @@ static inline void fscrypt_set_d_op(struct dentry *dentry) #if IS_ENABLED(CONFIG_FS_ENCRYPTION) /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; -int fscrypt_initialize(void); - -extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t); +extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); -extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t); -extern int fscrypt_decrypt_page(struct page *); +extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, + unsigned int, unsigned int, + u64, gfp_t); +extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, + unsigned int, u64); extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); extern void fscrypt_pullback_bio_page(struct page **, bool); extern void fscrypt_restore_control_page(struct page *); -extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, +extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, unsigned int); /* policy.c */ -extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *); -extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); +extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); +extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); /* keyinfo.c */ -extern int get_crypt_info(struct inode *); extern int fscrypt_get_encryption_info(struct inode *); extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); @@ -264,8 +193,8 @@ extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); extern int fscrypt_setup_filename(struct inode *, const struct qstr *, int lookup, struct fscrypt_name *); extern void fscrypt_free_filename(struct fscrypt_name *); -extern u32 fscrypt_fname_encrypted_size(struct inode *, u32); -extern int fscrypt_fname_alloc_buffer(struct inode *, u32, +extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32); +extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, @@ -275,7 +204,7 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, #endif /* crypto.c */ -static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i, +static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i, gfp_t f) { return ERR_PTR(-EOPNOTSUPP); @@ -286,13 +215,18 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c) return; } -static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, - struct page *p, gfp_t f) +static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i, + struct page *p, + unsigned int len, + unsigned int offs, + u64 lblk_num, gfp_t f) { return ERR_PTR(-EOPNOTSUPP); } -static inline int fscrypt_notsupp_decrypt_page(struct page *p) +static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p, + unsigned int len, unsigned int offs, + u64 lblk_num) { return -EOPNOTSUPP; } @@ -313,21 +247,21 @@ static inline void fscrypt_notsupp_restore_control_page(struct page *p) return; } -static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, +static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p, sector_t s, unsigned int f) { return -EOPNOTSUPP; } /* policy.c */ -static inline int fscrypt_notsupp_process_policy(struct file *f, - const struct fscrypt_policy *p) +static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f, + const void __user *arg) { return -EOPNOTSUPP; } -static inline int fscrypt_notsupp_get_policy(struct inode *i, - struct fscrypt_policy *p) +static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f, + void __user *arg) { return -EOPNOTSUPP; } diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 649e9171a9b3..3efa3b861d44 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -29,83 +29,112 @@ * #ifdefs. */ struct ccsr_guts { - __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ - __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ - __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ - __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ - __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ - __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and + * Control Register + */ + u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ u8 res018[0x20 - 0x18]; - __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u32 porcir; /* 0x.0020 - POR Configuration Information + * Register + */ u8 res024[0x30 - 0x24]; - __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u32 gpiocr; /* 0x.0030 - GPIO Control Register */ u8 res034[0x40 - 0x34]; - __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data + * Register + */ u8 res044[0x50 - 0x44]; - __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u32 gpindr; /* 0x.0050 - General-Purpose Input Data + * Register + */ u8 res054[0x60 - 0x54]; - __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ - __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ - __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u32 pmuxcr; /* 0x.0060 - Alternate Function Signal + * Multiplex Control + */ + u32 pmuxcr2; /* 0x.0064 - Alternate function signal + * multiplex control 2 + */ + u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ u8 res06c[0x70 - 0x6c]; - __be32 devdisr; /* 0x.0070 - Device Disable Control */ + u32 devdisr; /* 0x.0070 - Device Disable Control */ #define CCSR_GUTS_DEVDISR_TB1 0x00001000 #define CCSR_GUTS_DEVDISR_TB0 0x00004000 - __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ u8 res078[0x7c - 0x78]; - __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ - __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ - __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ - __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ - __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ - __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ - __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ - __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ - __be32 autorstsr; /* 0x.009c - Automatic reset status register */ - __be32 pvr; /* 0x.00a0 - Processor Version Register */ - __be32 svr; /* 0x.00a4 - System Version Register */ + u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control + * Register + */ + u32 powmgtcsr; /* 0x.0080 - Power Management Status and + * Control Register + */ + u32 pmrccr; /* 0x.0084 - Power Management Reset Counter + * Configuration Register + */ + u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter + * Configuration Register + */ + u32 pmcdr; /* 0x.008c - 4Power management clock disable + * register + */ + u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + u32 rstrscr; /* 0x.0094 - Reset Request Status and + * Control Register + */ + u32 ectrstcr; /* 0x.0098 - Exception reset control register */ + u32 autorstsr; /* 0x.009c - Automatic reset status register */ + u32 pvr; /* 0x.00a0 - Processor Version Register */ + u32 svr; /* 0x.00a4 - System Version Register */ u8 res0a8[0xb0 - 0xa8]; - __be32 rstcr; /* 0x.00b0 - Reset Control Register */ + u32 rstcr; /* 0x.00b0 - Reset Control Register */ u8 res0b4[0xc0 - 0xb4]; - __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register + u32 iovselsr; /* 0x.00c0 - I/O voltage select status register Called 'elbcvselcr' on 86xx SOCs */ u8 res0c4[0x100 - 0xc4]; - __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers There are 16 registers */ u8 res140[0x224 - 0x140]; - __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ - __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ u8 res22c[0x604 - 0x22c]; - __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ u8 res608[0x800 - 0x608]; - __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ u8 res804[0x900 - 0x804]; - __be32 ircr; /* 0x.0900 - Infrared Control Register */ + u32 ircr; /* 0x.0900 - Infrared Control Register */ u8 res904[0x908 - 0x904]; - __be32 dmacr; /* 0x.0908 - DMA Control Register */ + u32 dmacr; /* 0x.0908 - DMA Control Register */ u8 res90c[0x914 - 0x90c]; - __be32 elbccr; /* 0x.0914 - eLBC Control Register */ + u32 elbccr; /* 0x.0914 - eLBC Control Register */ u8 res918[0xb20 - 0x918]; - __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ - __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ - __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ u8 resb2c[0xe00 - 0xb2c]; - __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ u8 rese04[0xe10 - 0xe04]; - __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ u8 rese14[0xe20 - 0xe14]; - __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ - __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override + * register + */ u8 rese28[0xf04 - 0xe28]; - __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ - __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ u8 resf0c[0xf2c - 0xf0c]; - __be32 itcr; /* 0x.0f2c - Internal transaction control register */ + u32 itcr; /* 0x.0f2c - Internal transaction control + * register + */ u8 resf30[0xf40 - 0xf30]; - __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ - __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ + u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ } __attribute__ ((packed)); +u32 fsl_guts_get_svr(void); /* Alternate function signal multiplex control */ #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index f2912914141a..60cef8227534 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -100,6 +100,7 @@ struct fsl_usb2_platform_data { unsigned already_suspended:1; unsigned has_fsl_erratum_a007792:1; unsigned has_fsl_erratum_a005275:1; + unsigned has_fsl_erratum_a005697:1; unsigned check_phy_clk_valid:1; /* register save area for suspend/resume */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b3d34d3e0e7e..d4a884db16a3 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -398,6 +398,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, void ftrace_set_global_filter(unsigned char *buf, int len, int reset); void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); void ftrace_free_filter(struct ftrace_ops *ops); +void ftrace_ops_set_global_filter(struct ftrace_ops *ops); int register_ftrace_command(struct ftrace_func_command *cmd); int unregister_ftrace_command(struct ftrace_func_command *cmd); @@ -645,6 +646,7 @@ static inline unsigned long ftrace_location(unsigned long ip) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) +#define ftrace_ops_set_global_filter(ops) do { } while (0) static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return -ENODEV; } diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 667c31101b8b..377257d8f7e3 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -259,16 +259,7 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = { * {{{2 */ #define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family) -static struct genl_family ZZZ_genl_family __read_mostly = { - .id = GENL_ID_GENERATE, - .name = __stringify(GENL_MAGIC_FAMILY), - .version = GENL_MAGIC_VERSION, -#ifdef GENL_MAGIC_FAMILY_HDRSZ - .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), -#endif - .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, -}; - +static struct genl_family ZZZ_genl_family; /* * Magic: define multicast groups * Magic: define multicast group registration helper @@ -302,11 +293,23 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ #undef GENL_mc_group #define GENL_mc_group(group) +static struct genl_family ZZZ_genl_family __ro_after_init = { + .name = __stringify(GENL_MAGIC_FAMILY), + .version = GENL_MAGIC_VERSION, +#ifdef GENL_MAGIC_FAMILY_HDRSZ + .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), +#endif + .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, + .ops = ZZZ_genl_ops, + .n_ops = ARRAY_SIZE(ZZZ_genl_ops), + .mcgrps = ZZZ_genl_mcgrps, + .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps), + .module = THIS_MODULE, +}; + int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) { - return genl_register_family_with_ops_groups(&ZZZ_genl_family, \ - ZZZ_genl_ops, \ - ZZZ_genl_mcgrps); + return genl_register_family(&ZZZ_genl_family); } void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f8041f9de31e..4175dca4ac39 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -506,6 +506,8 @@ extern void free_hot_cold_page(struct page *page, bool cold); extern void free_hot_cold_page_list(struct list_head *list, bool cold); struct page_frag_cache; +extern void __page_frag_drain(struct page *page, unsigned int order, + unsigned int count); extern void *__alloc_page_frag(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask); extern void __free_page_frag(void *addr); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 24e2cc56beb1..c2748accea71 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -82,8 +82,6 @@ enum single_ended_mode { * implies that if the chip supports IRQs, these IRQs need to be threaded * as the chip access may sleep when e.g. reading out the IRQ status * registers. - * @irq_not_threaded: flag must be set if @can_sleep is set but the - * IRQs don't need to be threaded * @read_reg: reader function for generic GPIO * @write_reg: writer function for generic GPIO * @pin2mask: some generic GPIO controllers work with the big-endian bits @@ -91,7 +89,7 @@ enum single_ended_mode { * bit. This callback assigns the right bit mask. * @reg_dat: data (in) register for generic GPIO * @reg_set: output set register (out=high) for generic GPIO - * @reg_clk: output clear register (out=low) for generic GPIO + * @reg_clr: output clear register (out=low) for generic GPIO * @reg_dir: direction setting register for generic GPIO * @bgpio_bits: number of register bits used for a generic GPIO i.e. * <register width> * 8 @@ -109,8 +107,10 @@ enum single_ended_mode { * for GPIO IRQs, provided by GPIO driver * @irq_default_type: default IRQ triggering type applied during GPIO driver * initialization, provided by GPIO driver - * @irq_parent: GPIO IRQ chip parent/bank linux irq number, - * provided by GPIO driver + * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number, + * provided by GPIO driver for chained interrupt (not for nested + * interrupts). + * @irq_nested: True if set the interrupt handling is nested. * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all * bits set to one * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to @@ -166,7 +166,6 @@ struct gpio_chip { u16 ngpio; const char *const *names; bool can_sleep; - bool irq_not_threaded; #if IS_ENABLED(CONFIG_GPIO_GENERIC) unsigned long (*read_reg)(void __iomem *reg); @@ -192,7 +191,8 @@ struct gpio_chip { unsigned int irq_base; irq_flow_handler_t irq_handler; unsigned int irq_default_type; - int irq_parent; + int irq_chained_parent; + bool irq_nested; bool irq_need_valid_mask; unsigned long *irq_valid_mask; struct lock_class_key *lock_key; @@ -270,24 +270,40 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, int parent_irq, irq_flow_handler_t parent_handler); +void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + int parent_irq); + int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type, + bool nested, struct lock_class_key *lock_key); +/* FIXME: I assume threaded IRQchips do not have the lockdep problem */ +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type) +{ + return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, + handler, type, true, NULL); +} + #ifdef CONFIG_LOCKDEP #define gpiochip_irqchip_add(...) \ ( \ ({ \ static struct lock_class_key _key; \ - _gpiochip_irqchip_add(__VA_ARGS__, &_key); \ + _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \ }) \ ) #else #define gpiochip_irqchip_add(...) \ - _gpiochip_irqchip_add(__VA_ARGS__, NULL) + _gpiochip_irqchip_add(__VA_ARGS__, false, NULL) #endif #endif /* CONFIG_GPIOLIB_IRQCHIP */ diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index e31bcd4c7859..97585d9679f3 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -93,8 +93,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb) int hdlc_open(struct net_device *dev); /* Must be called by hardware driver when HDLC device is being closed */ void hdlc_close(struct net_device *dev); -/* May be used by hardware driver */ -int hdlc_change_mtu(struct net_device *dev, int new_mtu); /* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index e9744202fa29..edbb4fc674ed 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -78,6 +78,8 @@ enum hdmi_picture_aspect { HDMI_PICTURE_ASPECT_NONE, HDMI_PICTURE_ASPECT_4_3, HDMI_PICTURE_ASPECT_16_9, + HDMI_PICTURE_ASPECT_64_27, + HDMI_PICTURE_ASPECT_256_135, HDMI_PICTURE_ASPECT_RESERVED, }; diff --git a/include/linux/hid.h b/include/linux/hid.h index b2ec82712baa..28f38e2b8f30 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -231,7 +231,11 @@ struct hid_item { #define HID_DG_TAP 0x000d0035 #define HID_DG_TABLETFUNCTIONKEY 0x000d0039 #define HID_DG_PROGRAMCHANGEKEY 0x000d003a +#define HID_DG_BATTERYSTRENGTH 0x000d003b #define HID_DG_INVERT 0x000d003c +#define HID_DG_TILT_X 0x000d003d +#define HID_DG_TILT_Y 0x000d003e +#define HID_DG_TWIST 0x000d0041 #define HID_DG_TIPSWITCH 0x000d0042 #define HID_DG_TIPSWITCH2 0x000d0043 #define HID_DG_BARRELSWITCH 0x000d0044 @@ -479,6 +483,7 @@ struct hid_input { struct list_head list; struct hid_report *report; struct input_dev *input; + bool registered; }; enum hid_type { diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h index 8ec23fb0b412..402f99e328d4 100644 --- a/include/linux/hippidevice.h +++ b/include/linux/hippidevice.h @@ -32,7 +32,6 @@ struct hippi_cb { }; __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); -int hippi_change_mtu(struct net_device *dev, int new_mtu); int hippi_mac_addr(struct net_device *dev, void *p); int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); struct net_device *alloc_hippi_dev(int sizeof_priv); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9b9f65d99873..97e478d6b690 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -1,12 +1,12 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -extern int do_huge_pmd_anonymous_page(struct fault_env *fe); +extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf); extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); -extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); -extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); +extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, @@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd); + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); @@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page) return 1; } -extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); +extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -189,6 +189,8 @@ static inline void deferred_split_huge_page(struct page *page) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) +static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long address, bool freeze, struct page *page) {} static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) {} @@ -210,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, return NULL; } -static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) +static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) { return 0; } diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 34a0dc18f327..bee0827766a3 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -30,8 +30,7 @@ * Must not be NULL. *OBSOLETE* * @read: New API. drivers can fill up to max bytes of data * into the buffer. The buffer is aligned for any type - * and max is guaranteed to be >= to that alignment - * (either 4 or 8 depending on architecture). + * and max is a multiple of 4 and >= 32 bytes. * @priv: Private data, for use by the RNG driver. * @quality: Estimation of true entropy in RNG's bitstream * (per mill). diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 9d2f8bde7d12..78d59dba563e 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -298,8 +298,8 @@ enum hwmon_pwm_attributes { * Channel number * The function returns the file permissions. * If the return value is 0, no attribute will be created. - * @read: Read callback. Optional. If not provided, attributes - * will not be readable. + * @read: Read callback for data attributes. Mandatory if readable + * data attributes are present. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type @@ -308,8 +308,19 @@ enum hwmon_pwm_attributes { * Channel number * @val: Pointer to returned value * The function returns 0 on success or a negative error number. - * @write: Write callback. Optional. If not provided, attributes - * will not be writable. + * @read_string: + * Read callback for string attributes. Mandatory if string + * attributes are present. + * Parameters are: + * @dev: Pointer to hardware monitoring device + * @type: Sensor type + * @attr: Sensor attribute + * @channel: + * Channel number + * @str: Pointer to returned string + * The function returns 0 on success or a negative error number. + * @write: Write callback for data attributes. Mandatory if writeable + * data attributes are present. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type @@ -324,6 +335,8 @@ struct hwmon_ops { u32 attr, int channel); int (*read)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val); + int (*read_string)(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, char **str); int (*write)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val); }; @@ -349,7 +362,9 @@ struct hwmon_chip_info { const struct hwmon_channel_info **info; }; +/* hwmon_device_register() is deprecated */ struct device *hwmon_device_register(struct device *dev); + struct device * hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, @@ -362,12 +377,12 @@ struct device * hwmon_device_register_with_info(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *info, - const struct attribute_group **groups); + const struct attribute_group **extra_groups); struct device * devm_hwmon_device_register_with_info(struct device *dev, - const char *name, void *drvdata, - const struct hwmon_chip_info *info, - const struct attribute_group **groups); + const char *name, void *drvdata, + const struct hwmon_chip_info *info, + const struct attribute_group **extra_groups); void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index cd184bdca58f..42fe43fb0c80 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -696,7 +696,7 @@ enum vmbus_device_type { HV_FCOPY, HV_BACKUP, HV_DM, - HV_UNKOWN, + HV_UNKNOWN, }; struct vmbus_device { @@ -1119,6 +1119,12 @@ struct hv_driver { struct device_driver driver; + /* dynamic device GUID's */ + struct { + spinlock_t lock; + struct list_head list; + } dynids; + int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); int (*remove)(struct hv_device *); void (*shutdown)(struct hv_device *); @@ -1447,6 +1453,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel); void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); +void vmbus_setevent(struct vmbus_channel *channel); /* * Negotiated version with the Host. */ @@ -1479,10 +1486,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) * there is room for the producer to send the pending packet. */ -static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) +static inline void hv_signal_on_read(struct vmbus_channel *channel) { u32 cur_write_sz; u32 pending_sz; + struct hv_ring_buffer_info *rbi = &channel->inbound; /* * Issue a full memory barrier before making the signaling decision. @@ -1500,14 +1508,14 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); /* If the other end is not blocked on write don't bother. */ if (pending_sz == 0) - return false; + return; cur_write_sz = hv_get_bytes_to_write(rbi); if (cur_write_sz >= pending_sz) - return true; + vmbus_setevent(channel); - return false; + return; } /* @@ -1519,31 +1527,23 @@ static inline struct vmpacket_descriptor * get_next_pkt_raw(struct vmbus_channel *channel) { struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 read_loc = ring_info->priv_read_index; + u32 priv_read_loc = ring_info->priv_read_index; void *ring_buffer = hv_get_ring_buffer(ring_info); - struct vmpacket_descriptor *cur_desc; - u32 packetlen; u32 dsize = ring_info->ring_datasize; - u32 delta = read_loc - ring_info->ring_buffer->read_index; + /* + * delta is the difference between what is available to read and + * what was already consumed in place. We commit read index after + * the whole batch is processed. + */ + u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? + priv_read_loc - ring_info->ring_buffer->read_index : + (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) return NULL; - if ((read_loc + sizeof(*cur_desc)) > dsize) - return NULL; - - cur_desc = ring_buffer + read_loc; - packetlen = cur_desc->len8 << 3; - - /* - * If the packet under consideration is wrapping around, - * return failure. - */ - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) - return NULL; - - return cur_desc; + return ring_buffer + priv_read_loc; } /* @@ -1555,16 +1555,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, struct vmpacket_descriptor *desc) { struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 read_loc = ring_info->priv_read_index; u32 packetlen = desc->len8 << 3; u32 dsize = ring_info->ring_datasize; - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) - BUG(); /* * Include the packet trailer. */ ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; + ring_info->priv_read_index %= dsize; } /* @@ -1589,8 +1587,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel) virt_rmb(); ring_info->ring_buffer->read_index = ring_info->priv_read_index; - if (hv_need_to_signal_on_read(ring_info)) - vmbus_set_event(channel); + hv_signal_on_read(channel); } diff --git a/include/linux/idr.h b/include/linux/idr.h index 083d61e92706..3c01b89aed67 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -18,12 +18,11 @@ #include <linux/rcupdate.h> /* - * We want shallower trees and thus more bits covered at each layer. 8 - * bits gives us large enough first layer for most use cases and maximum - * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and - * 1k on 32bit. + * Using 6 bits at each layer allows us to allocate 7 layers out of each page. + * 8 bits only gave us 3 layers out of every pair of pages, which is less + * efficient except for trees with a largest element between 192-255 inclusive. */ -#define IDR_BITS 8 +#define IDR_BITS 6 #define IDR_SIZE (1 << IDR_BITS) #define IDR_MASK ((1 << IDR_BITS)-1) @@ -56,6 +55,32 @@ struct idr { #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) /** + * idr_get_cursor - Return the current position of the cyclic allocator + * @idr: idr handle + * + * The value returned is the value that will be next returned from + * idr_alloc_cyclic() if it is free (otherwise the search will start from + * this position). + */ +static inline unsigned int idr_get_cursor(struct idr *idr) +{ + return READ_ONCE(idr->cur); +} + +/** + * idr_set_cursor - Set the current position of the cyclic allocator + * @idr: idr handle + * @val: new position + * + * The next call to idr_alloc_cyclic() will return @val if it is free + * (otherwise the search will start from this position). + */ +static inline void idr_set_cursor(struct idr *idr, unsigned int val) +{ + WRITE_ONCE(idr->cur, val); +} + +/** * DOC: idr sync * idr synchronization (stolen from radix-tree.h) * @@ -195,6 +220,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } +static inline bool ida_is_empty(struct ida *ida) +{ + return idr_is_empty(&ida->idr); +} + void __init idr_init_cache(void); #endif /* __IDR_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a80516fd65c8..fe849329511a 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1576,6 +1576,9 @@ struct ieee80211_vht_operation { #define WLAN_AUTH_SHARED_KEY 1 #define WLAN_AUTH_FT 2 #define WLAN_AUTH_SAE 3 +#define WLAN_AUTH_FILS_SK 4 +#define WLAN_AUTH_FILS_SK_PFS 5 +#define WLAN_AUTH_FILS_PK 6 #define WLAN_AUTH_LEAP 128 #define WLAN_AUTH_CHALLENGE_LEN 128 @@ -1960,6 +1963,26 @@ enum ieee80211_eid { WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, + WLAN_EID_CAG_NUMBER = 237, + WLAN_EID_AP_CSN = 239, + WLAN_EID_FILS_INDICATION = 240, + WLAN_EID_DILS = 241, + WLAN_EID_FRAGMENT = 242, + WLAN_EID_EXTENSION = 255 +}; + +/* Element ID Extensions for Element ID 255 */ +enum ieee80211_eid_ext { + WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, + WLAN_EID_EXT_FILS_REQ_PARAMS = 2, + WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, + WLAN_EID_EXT_FILS_SESSION = 4, + WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, + WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, + WLAN_EID_EXT_KEY_DELIVERY = 7, + WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, + WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, + WLAN_EID_EXT_FILS_NONCE = 13, }; /* Action category code */ @@ -2073,6 +2096,9 @@ enum ieee80211_key_len { #define IEEE80211_GCMP_MIC_LEN 16 #define IEEE80211_GCMP_PN_LEN 6 +#define FILS_NONCE_LEN 16 +#define FILS_MAX_KEK_LEN 64 + /* Public action codes */ enum ieee80211_pub_actioncode { WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index f563907ed776..3355efc89781 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -44,4 +44,20 @@ static inline int arp_hdr_len(struct net_device *dev) return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; } } + +static inline bool dev_is_mac_header_xmit(const struct net_device *dev) +{ + switch (dev->type) { + case ARPHRD_TUNNEL: + case ARPHRD_TUNNEL6: + case ARPHRD_SIT: + case ARPHRD_IPGRE: + case ARPHRD_VOID: + case ARPHRD_NONE: + return false; + default: + return true; + } +} + #endif /* _LINUX_IF_ARP_H */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 3319d97d789d..8d5fcd6284ce 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -399,22 +399,6 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) skb->vlan_tci = 0; return skb; } -/* - * vlan_hwaccel_push_inside - pushes vlan tag to the payload - * @skb: skbuff to tag - * - * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the - * VLAN tag from @skb->vlan_tci inside to the payload. - * - * Following the skb_unshare() example, in case of error, the calling function - * doesn't have to worry about freeing the original skb. - */ -static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) -{ - if (skb_vlan_tag_present(skb)) - skb = __vlan_hwaccel_push_inside(skb); - return skb; -} /** * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index e7fdec4db9da..5ba430cc9a87 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -136,6 +136,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_OFFSET), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = (_si), \ .scan_type = { \ .sign = 'u', \ diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 9edccfba1ffb..47eeec3218b5 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -226,6 +226,34 @@ int iio_read_channel_processed(struct iio_channel *chan, int *val); int iio_write_channel_raw(struct iio_channel *chan, int val); /** + * iio_read_max_channel_raw() - read maximum available raw value from a given + * channel, i.e. the maximum possible value. + * @chan: The channel being queried. + * @val: Value read back. + * + * Note raw reads from iio channels are in adc counts and hence + * scale will need to be applied if standard units are required. + */ +int iio_read_max_channel_raw(struct iio_channel *chan, int *val); + +/** + * iio_read_avail_channel_raw() - read available raw values from a given channel + * @chan: The channel being queried. + * @vals: Available values read back. + * @length: Number of entries in vals. + * + * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. + * + * For ranges, three vals are always returned; min, step and max. + * For lists, all the possible values are enumerated. + * + * Note raw available values from iio channels are in adc counts and + * hence scale will need to be applied if standard units are required. + */ +int iio_read_avail_channel_raw(struct iio_channel *chan, + const int **vals, int *length); + +/** * iio_get_channel_type() - get the type of a channel * @channel: The channel being queried. * @type: The type of the channel. @@ -236,6 +264,19 @@ int iio_get_channel_type(struct iio_channel *channel, enum iio_chan_type *type); /** + * iio_read_channel_offset() - read the offset value for a channel + * @chan: The channel being queried. + * @val: First part of value read back. + * @val2: Second part of value read back. + * + * Note returns a description of what is in val and val2, such + * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val + * + val2/1e6 + */ +int iio_read_channel_offset(struct iio_channel *chan, int *val, + int *val2); + +/** * iio_read_channel_scale() - read the scale value for a channel * @chan: The channel being queried. * @val: First part of value read back. diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h index 91530e6611e9..628b2cf54c50 100644 --- a/include/linux/iio/dac/mcp4725.h +++ b/include/linux/iio/dac/mcp4725.h @@ -9,8 +9,18 @@ #ifndef IIO_DAC_MCP4725_H_ #define IIO_DAC_MCP4725_H_ +/** + * struct mcp4725_platform_data - MCP4725/6 DAC specific data. + * @use_vref: Whether an external reference voltage on Vref pin should be used. + * Additional vref-supply must be specified when used. + * @vref_buffered: Controls buffering of the external reference voltage. + * + * Vref related settings are available only on MCP4756. See + * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information. + */ struct mcp4725_platform_data { - u16 vref_mv; + bool use_vref; + bool vref_buffered; }; #endif /* IIO_DAC_MCP4725_H_ */ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b4a0679e4a49..3f5ea2e9a39e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -225,12 +225,22 @@ struct iio_event_spec { * endianness: little or big endian * @info_mask_separate: What information is to be exported that is specific to * this channel. + * @info_mask_separate_available: What availability information is to be + * exported that is specific to this channel. * @info_mask_shared_by_type: What information is to be exported that is shared * by all channels of the same type. + * @info_mask_shared_by_type_available: What availability information is to be + * exported that is shared by all channels of the same + * type. * @info_mask_shared_by_dir: What information is to be exported that is shared * by all channels of the same direction. + * @info_mask_shared_by_dir_available: What availability information is to be + * exported that is shared by all channels of the same + * direction. * @info_mask_shared_by_all: What information is to be exported that is shared * by all channels. + * @info_mask_shared_by_all_available: What availability information is to be + * exported that is shared by all channels. * @event_spec: Array of events which should be registered for this * channel. * @num_event_specs: Size of the event_spec array. @@ -269,9 +279,13 @@ struct iio_chan_spec { enum iio_endian endianness; } scan_type; long info_mask_separate; + long info_mask_separate_available; long info_mask_shared_by_type; + long info_mask_shared_by_type_available; long info_mask_shared_by_dir; + long info_mask_shared_by_dir_available; long info_mask_shared_by_all; + long info_mask_shared_by_all_available; const struct iio_event_spec *event_spec; unsigned int num_event_specs; const struct iio_chan_spec_ext_info *ext_info; @@ -301,6 +315,23 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, (chan->info_mask_shared_by_all & BIT(type)); } +/** + * iio_channel_has_available() - Checks if a channel has an available attribute + * @chan: The channel to be queried + * @type: Type of the available attribute to be checked + * + * Returns true if the channel supports reporting available values for the + * given attribute type, false otherwise. + */ +static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, + enum iio_chan_info_enum type) +{ + return (chan->info_mask_separate_available & BIT(type)) | + (chan->info_mask_shared_by_type_available & BIT(type)) | + (chan->info_mask_shared_by_dir_available & BIT(type)) | + (chan->info_mask_shared_by_all_available & BIT(type)); +} + #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ .type = IIO_TIMESTAMP, \ .channel = -1, \ @@ -349,6 +380,14 @@ struct iio_dev; * max_len specifies maximum number of elements * vals pointer can contain. val_len is used to return * length of valid elements in vals. + * @read_avail: function to return the available values from the device. + * mask specifies which value. Note 0 means the available + * values for the channel in question. Return value + * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is + * returned in vals. The type of the vals are returned in + * type and the number of vals is returned in length. For + * ranges, there are always three vals returned; min, step + * and max. For lists, all possible values are enumerated. * @write_raw: function to write a value to the device. * Parameters are the same as for read_raw. * @write_raw_get_fmt: callback function to query the expected @@ -381,7 +420,7 @@ struct iio_dev; **/ struct iio_info { struct module *driver_module; - struct attribute_group *event_attrs; + const struct attribute_group *event_attrs; const struct attribute_group *attrs; int (*read_raw)(struct iio_dev *indio_dev, @@ -397,6 +436,13 @@ struct iio_info { int *val_len, long mask); + int (*read_avail)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, + int *length, + long mask); + int (*write_raw)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h index 9cd8f747212f..ce9426c507fd 100644 --- a/include/linux/iio/sysfs.h +++ b/include/linux/iio/sysfs.h @@ -55,10 +55,34 @@ struct iio_const_attr { { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .address = _addr } +#define IIO_ATTR_RO(_name, _addr) \ + { .dev_attr = __ATTR_RO(_name), \ + .address = _addr } + +#define IIO_ATTR_WO(_name, _addr) \ + { .dev_attr = __ATTR_WO(_name), \ + .address = _addr } + +#define IIO_ATTR_RW(_name, _addr) \ + { .dev_attr = __ATTR_RW(_name), \ + .address = _addr } + #define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_name \ = IIO_ATTR(_name, _mode, _show, _store, _addr) +#define IIO_DEVICE_ATTR_RO(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_RO(_name, _addr) + +#define IIO_DEVICE_ATTR_WO(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_WO(_name, _addr) + +#define IIO_DEVICE_ATTR_RW(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_RW(_name, _addr) + #define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_vname \ = IIO_ATTR(_name, _mode, _show, _store, _addr) diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 4f1154f7a33c..ea08302f2d7b 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -170,6 +170,8 @@ void iio_trigger_free(struct iio_trigger *trig); */ bool iio_trigger_using_own(struct iio_dev *indio_dev); +int iio_trigger_validate_own_device(struct iio_trigger *trig, + struct iio_dev *indio_dev); #else struct iio_trigger; diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 32b579525004..2aa7b6384d64 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -29,4 +29,9 @@ enum iio_event_info { #define IIO_VAL_FRACTIONAL 10 #define IIO_VAL_FRACTIONAL_LOG2 11 +enum iio_available_type { + IIO_AVAIL_LIST, + IIO_AVAIL_RANGE, +}; + #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2d9b650047a5..d49e26c6cdc7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -429,6 +429,7 @@ struct intel_iommu { struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ struct idr pasid_idr; + u32 pasid_max; #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 72f0721f75e7..53144e78a369 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -232,6 +232,18 @@ struct irq_affinity_notify { void (*release)(struct kref *ref); }; +/** + * struct irq_affinity - Description for automatic irq affinity assignements + * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of + * the MSI(-X) vector space + * @post_vectors: Don't apply affinity to @post_vectors at end of + * the MSI(-X) vector space + */ +struct irq_affinity { + int pre_vectors; + int post_vectors; +}; + #if defined(CONFIG_SMP) extern cpumask_var_t irq_default_affinity; @@ -278,8 +290,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); -int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec); +struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); +int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -313,13 +325,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) } static inline struct cpumask * -irq_create_affinity_masks(const struct cpumask *affinity, int nvec) +irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) { return NULL; } static inline int -irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) +irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) { return maxvec; } diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 7892f55a1866..f185156de74d 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -49,6 +49,7 @@ struct iomap { #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ +#define IOMAP_FAULT (1 << 3) /* mapping for page fault */ struct iomap_ops { /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ca1ad9ebbc92..671d014e6429 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -64,6 +64,11 @@ struct ipv6_devconf { } stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; + __s32 seg6_enabled; +#ifdef CONFIG_IPV6_SEG6_HMAC + __s32 seg6_require_hmac; +#endif + __u32 enhanced_dad; struct ctl_table_header *sysctl_header; }; @@ -149,7 +154,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) { #if defined(CONFIG_NET_L3_MASTER_DEV) if (!net->ipv4.sysctl_tcp_l3mdev_accept && - ipv6_l3mdev_skb(IP6CB(skb)->flags)) + skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) return true; #endif return false; @@ -229,8 +234,9 @@ struct ipv6_pinfo { rxflow:1, rxtclass:1, rxpmtu:1, - rxorigdstaddr:1; - /* 2 bits hole */ + rxorigdstaddr:1, + recvfragsize:1; + /* 1 bits hole */ } bits; __u16 all; } rxopt; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index b7e34313cdfe..e808f8ae6f14 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -239,7 +239,7 @@ #define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_HWCOLLCNT_SHIFT 24 -#define GITS_CBASER_VALID (1UL << 63) +#define GITS_CBASER_VALID (1ULL << 63) #define GITS_CBASER_SHAREABILITY_SHIFT (10) #define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) #define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) @@ -265,7 +265,7 @@ #define GITS_BASER_NR_REGS 8 -#define GITS_BASER_VALID (1UL << 63) +#define GITS_BASER_VALID (1ULL << 63) #define GITS_BASER_INDIRECT (1ULL << 62) #define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) @@ -295,10 +295,10 @@ #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) #define GITS_BASER_PAGE_SIZE_SHIFT (8) -#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) -#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) -#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) -#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_PAGES_SHIFT (0) #define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 410decacff8f..68bd88223417 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -77,7 +77,6 @@ extern int kdb_poll_idx; * number whenever the kernel debugger is entered. */ extern int kdb_initial_cpu; -extern atomic_t kdb_event; /* Types and messages used for dynamically added kdb shell commands */ @@ -162,6 +161,7 @@ enum kdb_msgsrc { }; extern int kdb_trap_printk; +extern int kdb_printf_cpu; extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list args); extern __printf(1, 2) int kdb_printf(const char *, ...); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 441def77246d..56aec84237ad 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -45,11 +45,16 @@ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) +/* @a is a power of 2 value */ #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) +/* generic data direction definitions */ +#define READ 0 +#define WRITE 1 + #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) #define u64_to_user_ptr(x) ( \ diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 44fda64ad434..00f776816aa3 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -78,8 +78,8 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -extern void account_user_time(struct task_struct *, cputime_t, cputime_t); -extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); +extern void account_user_time(struct task_struct *, cputime_t); +extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_steal_time(cputime_t); extern void account_idle_time(cputime_t); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 406c33dcae13..d7437777baaa 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -259,12 +259,6 @@ phys_addr_t paddr_vmcoreinfo_note(void); vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) #define VMCOREINFO_CONFIG(name) \ vmcoreinfo_append_str("CONFIG_%s=y\n", #name) -#define VMCOREINFO_PAGE_OFFSET(value) \ - vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value) -#define VMCOREINFO_VMALLOC_START(value) \ - vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value) -#define VMCOREINFO_VMEMMAP_START(value) \ - vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value) extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; diff --git a/include/linux/kthread.h b/include/linux/kthread.h index a6e82a69c363..4fec8b775895 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), __k; \ }) +void free_kthread_struct(struct task_struct *k); void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); int kthread_stop(struct task_struct *k); @@ -174,7 +175,7 @@ __printf(2, 3) struct kthread_worker * kthread_create_worker(unsigned int flags, const char namefmt[], ...); -struct kthread_worker * +__printf(3, 4) struct kthread_worker * kthread_create_worker_on_cpu(int cpu, unsigned int flags, const char namefmt[], ...); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 01c0b9cc3915..1c5190dab2c1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -224,7 +224,6 @@ struct kvm_vcpu { int fpu_active; int guest_fpu_loaded, guest_xcr0_loaded; - unsigned char fpu_counter; struct swait_queue_head wq; struct pid *pid; int sigset_active; @@ -439,6 +438,9 @@ struct kvm { pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_debug(fmt, ...) \ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_debug_ratelimited(fmt, ...) \ + pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ + ## __VA_ARGS__) #define kvm_pr_unimpl(fmt, ...) \ pr_err_ratelimited("kvm [%i]: " fmt, \ task_tgid_nr(current), ## __VA_ARGS__) @@ -450,6 +452,9 @@ struct kvm { #define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ + kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ + ## __VA_ARGS__) #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) @@ -645,6 +650,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); +int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, int offset, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); @@ -1107,6 +1114,10 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) extern bool kvm_rebooting; +extern unsigned int halt_poll_ns; +extern unsigned int halt_poll_ns_grow; +extern unsigned int halt_poll_ns_shrink; + struct kvm_device { struct kvm_device_ops *ops; struct kvm *kvm; diff --git a/include/linux/leds.h b/include/linux/leds.h index ddfcb2df3656..569cb531094c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -42,16 +42,20 @@ struct led_classdev { #define LED_UNREGISTERING (1 << 1) /* Upper 16 bits reflect control information */ #define LED_CORE_SUSPENDRESUME (1 << 16) -#define LED_BLINK_SW (1 << 17) -#define LED_BLINK_ONESHOT (1 << 18) -#define LED_BLINK_ONESHOT_STOP (1 << 19) -#define LED_BLINK_INVERT (1 << 20) -#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21) -#define LED_BLINK_DISABLE (1 << 22) -#define LED_SYSFS_DISABLE (1 << 23) -#define LED_DEV_CAP_FLASH (1 << 24) -#define LED_HW_PLUGGABLE (1 << 25) -#define LED_PANIC_INDICATOR (1 << 26) +#define LED_SYSFS_DISABLE (1 << 17) +#define LED_DEV_CAP_FLASH (1 << 18) +#define LED_HW_PLUGGABLE (1 << 19) +#define LED_PANIC_INDICATOR (1 << 20) + + /* set_brightness_work / blink_timer flags, atomic, private. */ + unsigned long work_flags; + +#define LED_BLINK_SW 0 +#define LED_BLINK_ONESHOT 1 +#define LED_BLINK_ONESHOT_STOP 2 +#define LED_BLINK_INVERT 3 +#define LED_BLINK_BRIGHTNESS_CHANGE 4 +#define LED_BLINK_DISABLE 5 /* Set LED brightness level * Must not sleep. Use brightness_set_blocking for drivers @@ -89,6 +93,7 @@ struct led_classdev { unsigned long blink_delay_on, blink_delay_off; struct timer_list blink_timer; int blink_brightness; + int new_blink_brightness; void (*flash_resume)(struct led_classdev *led_cdev); struct work_struct set_brightness_work; diff --git a/include/linux/libata.h b/include/linux/libata.h index 616eef4d81ea..c170be548b7f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -166,6 +166,8 @@ enum { ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */ + ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */ + ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */ ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_DETACH = (1 << 24), @@ -342,7 +344,9 @@ enum { ATA_SHIFT_PIO = 0, ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, + ATA_SHIFT_PRIO = 6, + ATA_PRIO_HIGH = 2, /* size of buffer to pad xfers ending on unaligned boundaries */ ATA_DMA_PAD_SZ = 4, @@ -542,6 +546,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes) extern struct device_attribute dev_attr_link_power_management_policy; extern struct device_attribute dev_attr_unload_heads; +extern struct device_attribute dev_attr_ncq_prio_enable; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_sw_activity; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index f4947fda11e7..8458c5351e56 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -143,7 +143,7 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, const struct nd_cmd_desc *desc, int idx, void *buf); u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, const struct nd_cmd_desc *desc, int idx, const u32 *in_field, - const u32 *out_field); + const u32 *out_field, unsigned long remainder); int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count); struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index d190786e4ad8..7c273bbc5351 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -47,6 +47,7 @@ struct ppa_addr { struct nvm_rq; struct nvm_id; struct nvm_dev; +struct nvm_tgt_dev; typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); @@ -107,6 +108,8 @@ enum { NVM_RSP_NOT_CHANGEABLE = 0x1, NVM_RSP_ERR_FAILWRITE = 0x40ff, NVM_RSP_ERR_EMPTYPAGE = 0x42ff, + NVM_RSP_ERR_FAILECC = 0x4281, + NVM_RSP_WARN_HIGHECC = 0x4700, /* Device opcodes */ NVM_OP_HBREAD = 0x02, @@ -208,7 +211,7 @@ struct nvm_id { struct nvm_target { struct list_head list; - struct nvm_dev *dev; + struct nvm_tgt_dev *dev; struct nvm_tgt_type *type; struct gendisk *disk; }; @@ -228,7 +231,7 @@ typedef void (nvm_end_io_fn)(struct nvm_rq *); struct nvm_rq { struct nvm_tgt_instance *ins; - struct nvm_dev *dev; + struct nvm_tgt_dev *dev; struct bio *bio; @@ -263,35 +266,12 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) return rqdata + 1; } -struct nvm_block; - -struct nvm_lun { - int id; - - int lun_id; - int chnl_id; - - spinlock_t lock; - - unsigned int nr_free_blocks; /* Number of unused blocks */ - struct nvm_block *blocks; -}; - enum { NVM_BLK_ST_FREE = 0x1, /* Free block */ NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; -struct nvm_block { - struct list_head list; - struct nvm_lun *lun; - unsigned long id; - - void *priv; - int state; -}; - /* system block cpu representation */ struct nvm_sb_info { unsigned long seqnr; @@ -301,22 +281,12 @@ struct nvm_sb_info { struct ppa_addr fs_ppa; }; -struct nvm_dev { - struct nvm_dev_ops *ops; - - struct list_head devices; - - /* Media manager */ - struct nvmm_type *mt; - void *mp; - - /* System blocks */ - struct nvm_sb_info sb; - - /* Device information */ +/* Device generic information */ +struct nvm_geo { int nr_chnls; + int nr_luns; + int luns_per_chnl; /* -1 if channels are not symmetric */ int nr_planes; - int luns_per_chnl; int sec_per_pg; /* only sectors for a single page */ int pgs_per_blk; int blks_per_lun; @@ -336,14 +306,44 @@ struct nvm_dev { int sec_per_pl; /* all sectors across planes */ int sec_per_blk; int sec_per_lun; +}; + +struct nvm_tgt_dev { + /* Device information */ + struct nvm_geo geo; + + /* Base ppas for target LUNs */ + struct ppa_addr *luns; + + sector_t total_secs; + + struct nvm_id identity; + struct request_queue *q; + + struct nvm_dev *parent; + void *map; +}; + +struct nvm_dev { + struct nvm_dev_ops *ops; + + struct list_head devices; + + /* Media manager */ + struct nvmm_type *mt; + void *mp; + + /* System blocks */ + struct nvm_sb_info sb; + + /* Device information */ + struct nvm_geo geo; /* lower page table */ int lps_per_blk; int *lptbl; - unsigned long total_blocks; unsigned long total_secs; - int nr_luns; unsigned long *lun_map; void *dma_pool; @@ -352,26 +352,57 @@ struct nvm_dev { /* Backend device */ struct request_queue *q; - struct device dev; - struct device *parent_dev; char name[DISK_NAME_LEN]; void *private_data; + void *rmap; + struct mutex mlock; spinlock_t lock; }; +static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, + u64 pba) +{ + struct ppa_addr l; + int secs, pgs, blks, luns; + sector_t ppa = pba; + + l.ppa = 0; + + div_u64_rem(ppa, geo->sec_per_pg, &secs); + l.g.sec = secs; + + sector_div(ppa, geo->sec_per_pg); + div_u64_rem(ppa, geo->pgs_per_blk, &pgs); + l.g.pg = pgs; + + sector_div(ppa, geo->pgs_per_blk); + div_u64_rem(ppa, geo->blks_per_lun, &blks); + l.g.blk = blks; + + sector_div(ppa, geo->blks_per_lun); + div_u64_rem(ppa, geo->luns_per_chnl, &luns); + l.g.lun = luns; + + sector_div(ppa, geo->luns_per_chnl); + l.g.ch = ppa; + + return l; +} + static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, struct ppa_addr r) { + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; - l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; - l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; - l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; - l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; - l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; - l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; + l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; + l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset; + l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset; + l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset; + l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset; + l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset; return l; } @@ -379,24 +410,25 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, struct ppa_addr r) { + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; l.ppa = 0; /* * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. */ - l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & - (((1 << dev->ppaf.blk_len) - 1)); - l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & - (((1 << dev->ppaf.pg_len) - 1)); - l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & - (((1 << dev->ppaf.sect_len) - 1)); - l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & - (((1 << dev->ppaf.pln_len) - 1)); - l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & - (((1 << dev->ppaf.lun_len) - 1)); - l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & - (((1 << dev->ppaf.ch_len) - 1)); + l.g.blk = (r.ppa >> geo->ppaf.blk_offset) & + (((1 << geo->ppaf.blk_len) - 1)); + l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) & + (((1 << geo->ppaf.pg_len) - 1)); + l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) & + (((1 << geo->ppaf.sect_len) - 1)); + l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) & + (((1 << geo->ppaf.pln_len) - 1)); + l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) & + (((1 << geo->ppaf.lun_len) - 1)); + l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) & + (((1 << geo->ppaf.ch_len) - 1)); return l; } @@ -411,18 +443,13 @@ static inline void ppa_set_empty(struct ppa_addr *ppa_addr) ppa_addr->ppa = ADDR_EMPTY; } -static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, - struct nvm_block *blk) +static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2) { - struct ppa_addr ppa; - struct nvm_lun *lun = blk->lun; - - ppa.ppa = 0; - ppa.g.blk = blk->id % dev->blks_per_lun; - ppa.g.lun = lun->lun_id; - ppa.g.ch = lun->chnl_id; + if (ppa_empty(ppa1) || ppa_empty(ppa2)) + return 0; - return ppa; + return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) && + (ppa1.g.blk == ppa2.g.blk)); } static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) @@ -432,7 +459,7 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); +typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *); typedef void (nvm_tgt_exit_fn)(void *); struct nvm_tgt_type { @@ -465,23 +492,18 @@ typedef void (nvmm_unregister_fn)(struct nvm_dev *); typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); -typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, - struct nvm_lun *, unsigned long); -typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); -typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, - unsigned long); -typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int); -typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); -typedef int (nvmm_reserve_lun)(struct nvm_dev *, int); -typedef void (nvmm_release_lun)(struct nvm_dev *, int); -typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); - +typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *); +typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int); typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); +typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *, + struct ppa_addr, int); +typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int); + +enum { + TRANS_TGT_TO_DEV = 0x0, + TRANS_DEV_TO_TGT = 0x1, +}; struct nvmm_type { const char *name; @@ -493,54 +515,41 @@ struct nvmm_type { nvmm_create_tgt_fn *create_tgt; nvmm_remove_tgt_fn *remove_tgt; - /* Block administration callbacks */ - nvmm_get_blk_fn *get_blk; - nvmm_put_blk_fn *put_blk; - nvmm_open_blk_fn *open_blk; - nvmm_close_blk_fn *close_blk; - nvmm_flush_blk_fn *flush_blk; - nvmm_submit_io_fn *submit_io; nvmm_erase_blk_fn *erase_blk; - /* Bad block mgmt */ - nvmm_mark_blk_fn *mark_blk; - - /* Configuration management */ - nvmm_get_lun_fn *get_lun; - nvmm_reserve_lun *reserve_lun; - nvmm_release_lun *release_lun; - - /* Statistics */ - nvmm_lun_info_print_fn *lun_info_print; - nvmm_get_area_fn *get_area; nvmm_put_area_fn *put_area; + nvmm_trans_ppa_fn *trans_ppa; + nvmm_part_to_tgt_fn *part_to_tgt; + struct list_head list; }; extern int nvm_register_mgr(struct nvmm_type *); extern void nvm_unregister_mgr(struct nvmm_type *); -extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, - unsigned long); -extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); - extern struct nvm_dev *nvm_alloc_dev(int); extern int nvm_register(struct nvm_dev *); extern void nvm_unregister(struct nvm_dev *); -void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type); - -extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); +extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int); +extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, + int, int); +extern int nvm_max_phys_sects(struct nvm_tgt_dev *); +extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, const struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); -extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); -extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); +extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int); +extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int); +extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, + void *); +extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); +extern void nvm_put_area(struct nvm_tgt_dev *, sector_t); extern void nvm_end_io(struct nvm_rq *, int); extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, void *, int); @@ -548,6 +557,7 @@ extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, int, void *, int); extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); +extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); /* sysblk.c */ #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ @@ -569,10 +579,10 @@ extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_dev_factory(struct nvm_dev *, int flags); -#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \ - for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \ +#define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \ + for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \ (chid)++, (ppa).g.ch = (chid)) \ - for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \ + for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \ (lunid)++, (ppa).g.lun = (lunid)) #else /* CONFIG_NVM */ diff --git a/include/linux/list.h b/include/linux/list.h index 5809e9a2de5b..d1039ecaf94f 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -28,27 +28,42 @@ static inline void INIT_LIST_HEAD(struct list_head *list) list->prev = list; } +#ifdef CONFIG_DEBUG_LIST +extern bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next); +extern bool __list_del_entry_valid(struct list_head *entry); +#else +static inline bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + return true; +} +static inline bool __list_del_entry_valid(struct list_head *entry) +{ + return true; +} +#endif + /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { + if (!__list_add_valid(new, prev, next)) + return; + next->prev = new; new->next = next; new->prev = prev; WRITE_ONCE(prev->next, new); } -#else -extern void __list_add(struct list_head *new, - struct list_head *prev, - struct list_head *next); -#endif /** * list_add - add a new entry @@ -96,22 +111,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_del_entry(struct list_head *entry) { + if (!__list_del_entry_valid(entry)) + return; + __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { - __list_del(entry->prev, entry->next); + __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } -#else -extern void __list_del_entry(struct list_head *entry); -extern void list_del(struct list_head *entry); -#endif /** * list_replace - replace old entry by new one diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 2931aa43dab1..0d3f14fd2621 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -82,6 +82,7 @@ static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, } #endif +#ifdef CONFIG_MVEBU_MBUS int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); @@ -97,5 +98,12 @@ int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base, size_t mbus_size, phys_addr_t sdram_phys_base, size_t sdram_size); int mvebu_mbus_dt_init(bool is_coherent); +#else +static inline int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, + u8 *attr) +{ + return -EINVAL; +} +#endif /* CONFIG_MVEBU_MBUS */ #endif /* __LINUX_MBUS_H */ diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index a585b4b5fa0e..0661af17a758 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -16,6 +16,7 @@ #include <asm/mc146818rtc.h> /* register access macros */ #include <linux/bcd.h> #include <linux/delay.h> +#include <linux/pm-trace.h> #ifdef __KERNEL__ #include <linux/spinlock.h> /* spinlock_t */ diff --git a/include/linux/mdev.h b/include/linux/mdev.h new file mode 100644 index 000000000000..ec819e9a115a --- /dev/null +++ b/include/linux/mdev.h @@ -0,0 +1,168 @@ +/* + * Mediated device definition + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia <cjia@nvidia.com> + * Kirti Wankhede <kwankhede@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MDEV_H +#define MDEV_H + +/* Parent device */ +struct parent_device { + struct device *dev; + const struct parent_ops *ops; + + /* internal */ + struct kref ref; + struct mutex lock; + struct list_head next; + struct kset *mdev_types_kset; + struct list_head type_list; +}; + +/* Mediated device */ +struct mdev_device { + struct device dev; + struct parent_device *parent; + uuid_le uuid; + void *driver_data; + + /* internal */ + struct kref ref; + struct list_head next; + struct kobject *type_kobj; +}; + +/** + * struct parent_ops - Structure to be registered for each parent device to + * register the device to mdev module. + * + * @owner: The module owner. + * @dev_attr_groups: Attributes of the parent device. + * @mdev_attr_groups: Attributes of the mediated device. + * @supported_type_groups: Attributes to define supported types. It is mandatory + * to provide supported types. + * @create: Called to allocate basic resources in parent device's + * driver for a particular mediated device. It is + * mandatory to provide create ops. + * @kobj: kobject of type for which 'create' is called. + * @mdev: mdev_device structure on of mediated device + * that is being created + * Returns integer: success (0) or error (< 0) + * @remove: Called to free resources in parent device's driver for a + * a mediated device. It is mandatory to provide 'remove' + * ops. + * @mdev: mdev_device device structure which is being + * destroyed + * Returns integer: success (0) or error (< 0) + * @open: Open mediated device. + * @mdev: mediated device. + * Returns integer: success (0) or error (< 0) + * @release: release mediated device + * @mdev: mediated device. + * @read: Read emulation callback + * @mdev: mediated device structure + * @buf: read buffer + * @count: number of bytes to read + * @ppos: address. + * Retuns number on bytes read on success or error. + * @write: Write emulation callback + * @mdev: mediated device structure + * @buf: write buffer + * @count: number of bytes to be written + * @ppos: address. + * Retuns number on bytes written on success or error. + * @ioctl: IOCTL callback + * @mdev: mediated device structure + * @cmd: ioctl command + * @arg: arguments to ioctl + * @mmap: mmap callback + * @mdev: mediated device structure + * @vma: vma structure + * Parent device that support mediated device should be registered with mdev + * module with parent_ops structure. + **/ + +struct parent_ops { + struct module *owner; + const struct attribute_group **dev_attr_groups; + const struct attribute_group **mdev_attr_groups; + struct attribute_group **supported_type_groups; + + int (*create)(struct kobject *kobj, struct mdev_device *mdev); + int (*remove)(struct mdev_device *mdev); + int (*open)(struct mdev_device *mdev); + void (*release)(struct mdev_device *mdev); + ssize_t (*read)(struct mdev_device *mdev, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd, + unsigned long arg); + int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); +}; + +/* interface for exporting mdev supported type attributes */ +struct mdev_type_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf); + ssize_t (*store)(struct kobject *kobj, struct device *dev, + const char *buf, size_t count); +}; + +#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \ +struct mdev_type_attribute mdev_type_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) +#define MDEV_TYPE_ATTR_RW(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name) +#define MDEV_TYPE_ATTR_RO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name) +#define MDEV_TYPE_ATTR_WO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name) + +/** + * struct mdev_driver - Mediated device driver + * @name: driver name + * @probe: called when new device created + * @remove: called when device removed + * @driver: device driver structure + * + **/ +struct mdev_driver { + const char *name; + int (*probe)(struct device *dev); + void (*remove)(struct device *dev); + struct device_driver driver; +}; + +#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver) +#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev) + +static inline void *mdev_get_drvdata(struct mdev_device *mdev) +{ + return mdev->driver_data; +} + +static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) +{ + mdev->driver_data = data; +} + +extern struct bus_type mdev_bus_type; + +#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type) + +extern int mdev_register_device(struct device *dev, + const struct parent_ops *ops); +extern void mdev_unregister_device(struct device *dev); + +extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner); +extern void mdev_unregister_driver(struct mdev_driver *drv); + +#endif /* MDEV_H */ diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index e746919530f5..a0d274fe08f1 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -8,8 +8,7 @@ struct mei_cl_device; struct mei_device; -typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, - u32 events, void *context); +typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); /** * struct mei_cl_device - MEI device handle @@ -24,12 +23,12 @@ typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, * @me_cl: me client * @cl: mei client * @name: device name - * @event_work: async work to execute event callback - * @event_cb: Drivers register this callback to get asynchronous ME - * events (e.g. Rx buffer pending) notifications. - * @event_context: event callback run context - * @events_mask: Events bit mask requested by driver. - * @events: Events bitmask sent to the driver. + * @rx_work: async work to execute Rx event callback + * @rx_cb: Drivers register this callback to get asynchronous ME + * Rx buffer pending notifications. + * @notif_work: async work to execute FW notif event callback + * @notif_cb: Drivers register this callback to get asynchronous ME + * FW notification pending notifications. * * @do_match: wheather device can be matched with a driver * @is_added: device is already scanned @@ -44,11 +43,10 @@ struct mei_cl_device { struct mei_cl *cl; char name[MEI_CL_NAME_SIZE]; - struct work_struct event_work; - mei_cldev_event_cb_t event_cb; - void *event_context; - unsigned long events_mask; - unsigned long events; + struct work_struct rx_work; + mei_cldev_cb_t rx_cb; + struct work_struct notif_work; + mei_cldev_cb_t notif_cb; unsigned int do_match:1; unsigned int is_added:1; @@ -74,16 +72,27 @@ int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); +/** + * module_mei_cl_driver - Helper macro for registering mei cl driver + * + * @__mei_cldrv: mei_cl_driver structure + * + * Helper macro for mei cl drivers which do not do anything special in module + * init/exit, for eliminating a boilerplate code. + */ +#define module_mei_cl_driver(__mei_cldrv) \ + module_driver(__mei_cldrv, \ + mei_cldev_driver_register,\ + mei_cldev_driver_unregister) -int mei_cldev_register_event_cb(struct mei_cl_device *cldev, - unsigned long event_mask, - mei_cldev_event_cb_t read_cb, void *context); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, + size_t length); -#define MEI_CL_EVENT_RX 0 -#define MEI_CL_EVENT_TX 1 -#define MEI_CL_EVENT_NOTIF 2 +int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); +int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, + mei_cldev_cb_t notif_cb); const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); u8 mei_cldev_ver(const struct mei_cl_device *cldev); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5e5b2969d931..5f4d8281832b 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -7,6 +7,7 @@ #include <linux/mmzone.h> +#include <linux/dax.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/spinlock.h> @@ -177,6 +178,13 @@ static inline bool vma_migratable(struct vm_area_struct *vma) if (vma->vm_flags & (VM_IO | VM_PFNMAP)) return false; + /* + * DAX device mappings require predictable access latency, so avoid + * incurring periodic faults. + */ + if (vma_is_dax(vma)) + return false; + #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION if (vma->vm_flags & VM_HUGETLB) return false; diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 76f7ef4d3a0d..f62043a75f43 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -148,6 +148,15 @@ struct cros_ec_device { int event_size; }; +/** + * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information + * + * @sensor_num: Id of the sensor, as reported by the EC. + */ +struct cros_ec_sensor_platform { + u8 sensor_num; +}; + /* struct cros_ec_platform - ChromeOS EC platform information * * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) @@ -175,6 +184,7 @@ struct cros_ec_dev { struct cros_ec_device *ec_dev; struct device *dev; u16 cmd_offset; + u32 features[2]; }; /** diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 76728ff37d01..1683003603f3 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -713,6 +713,90 @@ struct ec_response_get_set_value { /* More than one command can use these structs to get/set paramters. */ #define EC_CMD_GSV_PAUSE_IN_S5 0x0c +/*****************************************************************************/ +/* List the features supported by the firmware */ +#define EC_CMD_GET_FEATURES 0x0d + +/* Supported features */ +enum ec_feature_code { + /* + * This image contains a limited set of features. Another image + * in RW partition may support more features. + */ + EC_FEATURE_LIMITED = 0, + /* + * Commands for probing/reading/writing/erasing the flash in the + * EC are present. + */ + EC_FEATURE_FLASH = 1, + /* + * Can control the fan speed directly. + */ + EC_FEATURE_PWM_FAN = 2, + /* + * Can control the intensity of the keyboard backlight. + */ + EC_FEATURE_PWM_KEYB = 3, + /* + * Support Google lightbar, introduced on Pixel. + */ + EC_FEATURE_LIGHTBAR = 4, + /* Control of LEDs */ + EC_FEATURE_LED = 5, + /* Exposes an interface to control gyro and sensors. + * The host goes through the EC to access these sensors. + * In addition, the EC may provide composite sensors, like lid angle. + */ + EC_FEATURE_MOTION_SENSE = 6, + /* The keyboard is controlled by the EC */ + EC_FEATURE_KEYB = 7, + /* The AP can use part of the EC flash as persistent storage. */ + EC_FEATURE_PSTORE = 8, + /* The EC monitors BIOS port 80h, and can return POST codes. */ + EC_FEATURE_PORT80 = 9, + /* + * Thermal management: include TMP specific commands. + * Higher level than direct fan control. + */ + EC_FEATURE_THERMAL = 10, + /* Can switch the screen backlight on/off */ + EC_FEATURE_BKLIGHT_SWITCH = 11, + /* Can switch the wifi module on/off */ + EC_FEATURE_WIFI_SWITCH = 12, + /* Monitor host events, through for example SMI or SCI */ + EC_FEATURE_HOST_EVENTS = 13, + /* The EC exposes GPIO commands to control/monitor connected devices. */ + EC_FEATURE_GPIO = 14, + /* The EC can send i2c messages to downstream devices. */ + EC_FEATURE_I2C = 15, + /* Command to control charger are included */ + EC_FEATURE_CHARGER = 16, + /* Simple battery support. */ + EC_FEATURE_BATTERY = 17, + /* + * Support Smart battery protocol + * (Common Smart Battery System Interface Specification) + */ + EC_FEATURE_SMART_BATTERY = 18, + /* EC can dectect when the host hangs. */ + EC_FEATURE_HANG_DETECT = 19, + /* Report power information, for pit only */ + EC_FEATURE_PMU = 20, + /* Another Cros EC device is present downstream of this one */ + EC_FEATURE_SUB_MCU = 21, + /* Support USB Power delivery (PD) commands */ + EC_FEATURE_USB_PD = 22, + /* Control USB multiplexer, for audio through USB port for instance. */ + EC_FEATURE_USB_MUX = 23, + /* Motion Sensor code has an internal software FIFO */ + EC_FEATURE_MOTION_SENSE_FIFO = 24, +}; + +#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) +#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) +struct ec_response_get_features { + uint32_t flags[2]; +} __packed; /*****************************************************************************/ /* Flash commands */ @@ -1315,6 +1399,24 @@ enum motionsense_command { */ MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, + /* + * Returns a single sensor data. + */ + MOTIONSENSE_CMD_DATA = 6, + + /* + * Perform low level calibration.. On sensors that support it, ask to + * do offset calibration. + */ + MOTIONSENSE_CMD_PERFORM_CALIB = 10, + + /* + * Sensor Offset command is a setter/getter command for the offset used + * for calibration. The offsets can be calculated by the host, or via + * PERFORM_CALIB command. + */ + MOTIONSENSE_CMD_SENSOR_OFFSET = 11, + /* Number of motionsense sub-commands. */ MOTIONSENSE_NUM_CMDS }; @@ -1335,12 +1437,18 @@ enum motionsensor_id { enum motionsensor_type { MOTIONSENSE_TYPE_ACCEL = 0, MOTIONSENSE_TYPE_GYRO = 1, + MOTIONSENSE_TYPE_MAG = 2, + MOTIONSENSE_TYPE_PROX = 3, + MOTIONSENSE_TYPE_LIGHT = 4, + MOTIONSENSE_TYPE_ACTIVITY = 5, + MOTIONSENSE_TYPE_MAX }; /* List of motion sensor locations. */ enum motionsensor_location { MOTIONSENSE_LOC_BASE = 0, MOTIONSENSE_LOC_LID = 1, + MOTIONSENSE_LOC_MAX, }; /* List of motion sensor chips. */ @@ -1361,6 +1469,31 @@ enum motionsensor_chip { */ #define EC_MOTION_SENSE_NO_VALUE -1 +#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000 + +/* Set Calibration information */ +#define MOTION_SENSE_SET_OFFSET 1 + +struct ec_response_motion_sensor_data { + /* Flags for each sensor. */ + uint8_t flags; + /* Sensor number the data comes from */ + uint8_t sensor_num; + /* Each sensor is up to 3-axis. */ + union { + int16_t data[3]; + struct { + uint16_t rsvd; + uint32_t timestamp; + } __packed; + struct { + uint8_t activity; /* motionsensor_activity */ + uint8_t state; + int16_t add_info[2]; + }; + }; +} __packed; + struct ec_params_motion_sense { uint8_t cmd; union { @@ -1378,9 +1511,37 @@ struct ec_params_motion_sense { int16_t data; } ec_rate, kb_wake_angle; + /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ + struct { + uint8_t sensor_num; + + /* + * bit 0: If set (MOTION_SENSE_SET_OFFSET), set + * the calibration information in the EC. + * If unset, just retrieve calibration information. + */ + uint16_t flags; + + /* + * Temperature at calibration, in units of 0.01 C + * 0x8000: invalid / unknown. + * 0x0: 0C + * 0x7fff: +327.67C + */ + int16_t temp; + + /* + * Offset for calibration. + * Unit: + * Accelerometer: 1/1024 g + * Gyro: 1/1024 deg/s + * Compass: 1/16 uT + */ + int16_t offset[3]; + } __packed sensor_offset; + /* Used for MOTIONSENSE_CMD_INFO. */ struct { - /* Should be element of enum motionsensor_id. */ uint8_t sensor_num; } info; @@ -1410,11 +1571,14 @@ struct ec_response_motion_sense { /* Flags representing the motion sensor module. */ uint8_t module_flags; - /* Flags for each sensor in enum motionsensor_id. */ - uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT]; + /* Number of sensors managed directly by the EC. */ + uint8_t sensor_count; - /* Array of all sensor data. Each sensor is 3-axis. */ - int16_t data[3*EC_MOTION_SENSOR_COUNT]; + /* + * Sensor data is truncated if response_max is too small + * for holding all the data. + */ + struct ec_response_motion_sensor_data sensor[0]; } dump; /* Used for MOTIONSENSE_CMD_INFO. */ @@ -1429,6 +1593,9 @@ struct ec_response_motion_sense { uint8_t chip; } info; + /* Used for MOTIONSENSE_CMD_DATA */ + struct ec_response_motion_sensor_data data; + /* * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, * MOTIONSENSE_CMD_SENSOR_RANGE, and @@ -1438,6 +1605,12 @@ struct ec_response_motion_sense { /* Current value of the parameter queried. */ int32_t ret; } ec_rate, sensor_odr, sensor_range, kb_wake_angle; + + /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ + struct { + int16_t temp; + int16_t offset[3]; + } sensor_offset, perform_calib; }; } __packed; diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h index 3ca0af07fc78..ad2a9a852aea 100644 --- a/include/linux/mfd/max77620.h +++ b/include/linux/mfd/max77620.h @@ -180,6 +180,7 @@ #define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) #define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 #define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) +#define MAX20024_SD_CFG1_MPOK_MASK BIT(1) #define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) #define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 #define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) @@ -187,6 +188,7 @@ /* LDO_CNFG2 */ #define MAX77620_LDO_POWER_MODE_MASK 0xC0 #define MAX77620_LDO_POWER_MODE_SHIFT 6 +#define MAX20024_LDO_CFG2_MPOK_MASK BIT(2) #define MAX77620_LDO_CFG2_ADE_MASK BIT(1) #define MAX77620_LDO_CFG2_ADE_DISABLE 0 #define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 7f55b8b41032..b9a53e013bff 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -23,6 +23,8 @@ #define REG_IRQENABLE 0x02C #define REG_IRQCLR 0x030 #define REG_IRQWAKEUP 0x034 +#define REG_DMAENABLE_SET 0x038 +#define REG_DMAENABLE_CLEAR 0x03c #define REG_CTRL 0x040 #define REG_ADCFSM 0x044 #define REG_CLKDIV 0x04C @@ -36,6 +38,7 @@ #define REG_FIFO0THR 0xE8 #define REG_FIFO1CNT 0xF0 #define REG_FIFO1THR 0xF4 +#define REG_DMA1REQ 0xF8 #define REG_FIFO0 0x100 #define REG_FIFO1 0x200 @@ -126,6 +129,10 @@ #define FIFOREAD_DATA_MASK (0xfff << 0) #define FIFOREAD_CHNLID_MASK (0xf << 16) +/* DMA ENABLE/CLEAR Register */ +#define DMA_FIFO0 BIT(0) +#define DMA_FIFO1 BIT(1) + /* Sequencer Status */ #define SEQ_STATUS BIT(5) #define CHARGE_STEP 0x11 @@ -155,6 +162,7 @@ struct ti_tscadc_dev { struct device *dev; struct regmap *regmap; void __iomem *tscadc_base; + phys_addr_t tscadc_phys_base; int irq; int used_cells; /* 1-2 */ int tsc_wires; diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 7a26286db895..fba44abd05ba 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -100,6 +100,11 @@ #define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) /* + * Some controllers have a 32-bit wide data port register + */ +#define TMIO_MMC_32BIT_DATA_PORT (1 << 9) + +/* * Some controllers allows to set SDx actual clock */ #define TMIO_MMC_CLK_ACTUAL (1 << 10) diff --git a/include/linux/mii.h b/include/linux/mii.h index 47492c9631b3..1629a0c32679 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -31,7 +31,11 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_ethtool_get_link_ksettings( + struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_ethtool_set_link_ksettings( + struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd); extern int mii_check_gmii_support(struct mii_if_info *mii); extern void mii_check_link (struct mii_if_info *mii); extern unsigned int mii_check_media (struct mii_if_info *mii, diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 722698a43d79..a426cb55dc43 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -72,6 +72,13 @@ extern int misc_register(struct miscdevice *misc); extern void misc_deregister(struct miscdevice *misc); /* + * Helper macro for drivers that don't do anything special in the initcall. + * This helps in eleminating of boilerplate code. + */ +#define builtin_misc_device(__misc_device) \ + builtin_driver(__misc_device, misc_register) + +/* * Helper macro for drivers that don't do anything special in module init / exit * call. This helps in eleminating of boilerplate code. */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 3be7abd6e722..c9f379689dd0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -476,7 +476,6 @@ enum { enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, - MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 58276144ba81..9f489365b3d3 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -277,6 +277,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, @@ -552,6 +553,15 @@ struct mlx5_eqe_vport_change { __be32 rsvd1[6]; } __packed; +struct mlx5_eqe_port_module { + u8 reserved_at_0[1]; + u8 module; + u8 reserved_at_2[1]; + u8 module_status; + u8 reserved_at_4[2]; + u8 error_type; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -565,6 +575,7 @@ union ev_data { struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_page_fault page_fault; struct mlx5_eqe_vport_change vport_change; + struct mlx5_eqe_port_module port_module; } __packed; struct mlx5_eqe { @@ -1060,6 +1071,11 @@ enum { MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; +enum { + MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, + MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2, +}; + static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ecc451d89ccd..0ae55361e674 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -104,6 +104,8 @@ enum { enum { MLX5_REG_QETCR = 0x4005, MLX5_REG_QTCT = 0x400a, + MLX5_REG_DCBX_PARAM = 0x4020, + MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -121,6 +123,12 @@ enum { MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MPCNT = 0x9051, +}; + +enum mlx5_dcbx_oper_mode { + MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, + MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, }; enum { @@ -208,7 +216,7 @@ struct mlx5_cmd_first { struct mlx5_cmd_msg { struct list_head list; - struct cache_ent *cache; + struct cmd_msg_cache *parent; u32 len; struct mlx5_cmd_first first; struct mlx5_cmd_mailbox *next; @@ -228,17 +236,17 @@ struct mlx5_cmd_debug { u16 outlen; }; -struct cache_ent { +struct cmd_msg_cache { /* protect block chain allocations */ spinlock_t lock; struct list_head head; + unsigned int max_inbox_size; + unsigned int num_ent; }; -struct cmd_msg_cache { - struct cache_ent large; - struct cache_ent med; - +enum { + MLX5_NUM_COMMAND_CACHES = 5, }; struct mlx5_cmd_stats { @@ -281,7 +289,7 @@ struct mlx5_cmd { struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct pci_pool *pool; struct mlx5_cmd_debug dbg; - struct cmd_msg_cache cache; + struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; }; @@ -310,6 +318,13 @@ struct mlx5_buf { u8 page_shift; }; +struct mlx5_frag_buf { + struct mlx5_buf_list *frags; + int npages; + int size; + u8 page_shift; +}; + struct mlx5_eq_tasklet { struct list_head list; struct list_head process_list; @@ -498,6 +513,31 @@ struct mlx5_rl_table { struct mlx5_rl_entry *rl_entry; }; +enum port_module_event_status_type { + MLX5_MODULE_STATUS_PLUGGED = 0x1, + MLX5_MODULE_STATUS_UNPLUGGED = 0x2, + MLX5_MODULE_STATUS_ERROR = 0x3, + MLX5_MODULE_STATUS_NUM = 0x3, +}; + +enum port_module_event_error_type { + MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, + MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, + MLX5_MODULE_EVENT_ERROR_BUS_STUCK, + MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, + MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, + MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, + MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, + MLX5_MODULE_EVENT_ERROR_BAD_CABLE, + MLX5_MODULE_EVENT_ERROR_UNKNOWN, + MLX5_MODULE_EVENT_ERROR_NUM, +}; + +struct mlx5_port_module_event_stats { + u64 status_counters[MLX5_MODULE_STATUS_NUM]; + u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; +}; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; @@ -559,6 +599,8 @@ struct mlx5_priv { unsigned long pci_dev_data; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; + + struct mlx5_port_module_event_stats pme_stats; }; enum mlx5_device_state { @@ -787,6 +829,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); +int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_frag_buf *buf, int node); +void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, @@ -831,6 +876,7 @@ void mlx5_unregister_debugfs(void); int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); +void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 93ebc5e21334..949b24b6c479 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -42,6 +42,10 @@ enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, }; +enum { + MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), +}; + #define LEFTOVERS_RULE_NUM 2 static inline void build_leftovers_ft_param(int *priority, int *n_ent, @@ -69,8 +73,8 @@ enum mlx5_flow_namespace_type { struct mlx5_flow_table; struct mlx5_flow_group; -struct mlx5_flow_rule; struct mlx5_flow_namespace; +struct mlx5_flow_handle; struct mlx5_flow_spec { u8 match_criteria_enable; @@ -97,13 +101,15 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, int max_num_groups, - u32 level); + u32 level, + u32 flags); struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, - u32 level); + u32 level, + u32 flags); struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, @@ -124,21 +130,28 @@ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); +struct mlx5_flow_act { + u32 action; + u32 flow_tag; + u32 encap_id; +}; + /* Single destination per rule. * Group ID is implied by the match criteria. */ -struct mlx5_flow_rule * -mlx5_add_flow_rule(struct mlx5_flow_table *ft, - struct mlx5_flow_spec *spec, - u32 action, - u32 flow_tag, - struct mlx5_flow_destination *dest); -void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); - -int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, - struct mlx5_flow_destination *dest); - -struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); +struct mlx5_flow_handle * +mlx5_add_flow_rules(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num); +void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); + +int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, + struct mlx5_flow_destination *new_dest, + struct mlx5_flow_destination *old_dest); + +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 6045d4d58065..a5f0fbedf1e7 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -83,6 +83,7 @@ enum { MLX5_CMD_OP_SET_HCA_CAP = 0x109, MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, + MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -145,6 +146,12 @@ enum { MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, + MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, + MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, + MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, + MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, + MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, + MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, @@ -537,13 +544,27 @@ struct mlx5_ifc_e_switch_cap_bits { struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; - u8 reserved_0[0x1f]; - u8 reserved_1[0x20]; + u8 esw_scheduling[0x1]; + u8 reserved_at_2[0x1e]; + + u8 reserved_at_20[0x20]; + u8 packet_pacing_max_rate[0x20]; + u8 packet_pacing_min_rate[0x20]; - u8 reserved_2[0x10]; + + u8 reserved_at_80[0x10]; u8 packet_pacing_rate_table_size[0x10]; - u8 reserved_3[0x760]; + + u8 esw_element_type[0x10]; + u8 esw_tsar_type[0x10]; + + u8 reserved_at_c0[0x10]; + u8 max_qos_para_vport[0x10]; + + u8 max_tsar_bw_share[0x20]; + + u8 reserved_at_100[0x700]; }; struct mlx5_ifc_per_protocol_networking_offload_caps_bits { @@ -804,7 +825,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 early_vf_enable[0x1]; u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; - u8 reserved_at_1af[0x2]; + u8 port_module_event[0x1]; + u8 reserved_at_1b0[0x1]; u8 ports_check[0x1]; u8 reserved_at_1b2[0x1]; u8 disable_link_up[0x1]; @@ -888,7 +910,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_at_261[0x1]; + u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; @@ -1735,6 +1757,80 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 reserved_at_4c0[0x300]; }; +struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { + u8 life_time_counter_high[0x20]; + + u8 life_time_counter_low[0x20]; + + u8 rx_errors[0x20]; + + u8 tx_errors[0x20]; + + u8 l0_to_recovery_eieos[0x20]; + + u8 l0_to_recovery_ts[0x20]; + + u8 l0_to_recovery_framing[0x20]; + + u8 l0_to_recovery_retrain[0x20]; + + u8 crc_error_dllp[0x20]; + + u8 crc_error_tlp[0x20]; + + u8 reserved_at_140[0x680]; +}; + +struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits { + u8 life_time_counter_high[0x20]; + + u8 life_time_counter_low[0x20]; + + u8 time_to_boot_image_start[0x20]; + + u8 time_to_link_image[0x20]; + + u8 calibration_time[0x20]; + + u8 time_to_first_perst[0x20]; + + u8 time_to_detect_state[0x20]; + + u8 time_to_l0[0x20]; + + u8 time_to_crs_en[0x20]; + + u8 time_to_plastic_image_start[0x20]; + + u8 time_to_iron_image_start[0x20]; + + u8 perst_handler[0x20]; + + u8 times_in_l1[0x20]; + + u8 times_in_l23[0x20]; + + u8 dl_down[0x20]; + + u8 config_cycle1usec[0x20]; + + u8 config_cycle2to7usec[0x20]; + + u8 config_cycle_8to15usec[0x20]; + + u8 config_cycle_16_to_63usec[0x20]; + + u8 config_cycle_64usec[0x20]; + + u8 correctable_err_msg_sent[0x20]; + + u8 non_fatal_err_msg_sent[0x20]; + + u8 fatal_err_msg_sent[0x20]; + + u8 reserved_at_2e0[0x4e0]; +}; + struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; @@ -2333,6 +2429,30 @@ struct mlx5_ifc_sqc_bits { struct mlx5_ifc_wq_bits wq; }; +enum { + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, + SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, +}; + +struct mlx5_ifc_scheduling_context_bits { + u8 element_type[0x8]; + u8 reserved_at_8[0x18]; + + u8 element_attributes[0x20]; + + u8 parent_element_id[0x20]; + + u8 reserved_at_60[0x40]; + + u8 bw_share[0x20]; + + u8 max_average_bw[0x20]; + + u8 reserved_at_e0[0x120]; +}; + struct mlx5_ifc_rqtc_bits { u8 reserved_at_0[0xa0]; @@ -2844,7 +2964,7 @@ struct mlx5_ifc_xrqc_bits { struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; - u8 reserved_at_180[0x200]; + u8 reserved_at_180[0x880]; struct mlx5_ifc_wq_bits wq; }; @@ -2875,6 +2995,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { u8 reserved_at_0[0x7c0]; }; +union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { + struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; + struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout; + u8 reserved_at_0[0x7c0]; +}; + union mlx5_ifc_event_auto_bits { struct mlx5_ifc_comp_event_bits comp_event; struct mlx5_ifc_dct_events_bits dct_events; @@ -2920,6 +3046,29 @@ struct mlx5_ifc_register_loopback_control_bits { u8 reserved_at_20[0x60]; }; +struct mlx5_ifc_vport_tc_element_bits { + u8 traffic_class[0x4]; + u8 reserved_at_4[0xc]; + u8 vport_number[0x10]; +}; + +struct mlx5_ifc_vport_element_bits { + u8 reserved_at_0[0x10]; + u8 vport_number[0x10]; +}; + +enum { + TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, + TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, + TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, +}; + +struct mlx5_ifc_tsar_element_bits { + u8 reserved_at_0[0x8]; + u8 tsar_type[0x8]; + u8 reserved_at_10[0x10]; +}; + struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -3540,6 +3689,39 @@ struct mlx5_ifc_query_special_contexts_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_query_scheduling_element_out_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + +enum { + SCHEDULING_HIERARCHY_E_SWITCH = 0x2, +}; + +struct mlx5_ifc_query_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x180]; +}; + struct mlx5_ifc_query_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -3904,6 +4086,25 @@ struct mlx5_ifc_query_issi_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_set_driver_version_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_driver_version_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + u8 driver_version[64][0x8]; +}; + struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4725,6 +4926,43 @@ struct mlx5_ifc_modify_sq_in_bits { struct mlx5_ifc_sqc_bits ctx; }; +struct mlx5_ifc_modify_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1c0]; +}; + +enum { + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1, + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2, +}; + +struct mlx5_ifc_modify_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x20]; + + u8 modify_bitmask[0x20]; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + struct mlx5_ifc_modify_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5390,6 +5628,30 @@ struct mlx5_ifc_destroy_sq_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_destroy_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1c0]; +}; + +struct mlx5_ifc_destroy_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x180]; +}; + struct mlx5_ifc_destroy_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6017,6 +6279,36 @@ struct mlx5_ifc_create_sq_in_bits { struct mlx5_ifc_sqc_bits ctx; }; +struct mlx5_ifc_create_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_a0[0x160]; +}; + +struct mlx5_ifc_create_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 reserved_at_60[0xa0]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + struct mlx5_ifc_create_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -7028,6 +7320,18 @@ struct mlx5_ifc_ppcnt_reg_bits { union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; +struct mlx5_ifc_mpcnt_reg_bits { + u8 reserved_at_0[0x8]; + u8 pcie_index[0x8]; + u8 reserved_at_10[0xa]; + u8 grp[0x6]; + + u8 clr[0x1]; + u8 reserved_at_21[0x1f]; + + union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; +}; + struct mlx5_ifc_ppad_reg_bits { u8 reserved_at_0[0x3]; u8 single_mac[0x1]; @@ -7633,6 +7937,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; + struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_ppsc_reg_bits ppsc_reg; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index b3065acd20b4..e527732fb31b 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -94,6 +94,9 @@ enum mlx5e_link_mode { #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) +#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF +#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); @@ -138,8 +141,12 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); +int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, + u8 prio, u8 *tc); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); +int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, + u8 tc, u8 *bw_pct); int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_unit); @@ -155,4 +162,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data); +int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); +int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); #endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index 33c97dc900f8..1cde0fd53f90 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h @@ -55,7 +55,7 @@ struct mlx5_srq_attr { u32 lwm; u32 user_index; u64 db_record; - u64 *pas; + __be64 *pas; }; struct mlx5_core_dev; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 451b0bde9083..ec35157ea725 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -36,6 +36,12 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/device.h> +enum { + MLX5_CAP_INLINE_MODE_L2, + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, + MLX5_CAP_INLINE_MODE_NOT_REQUIRED, +}; + u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); @@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); -void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline); +int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, diff --git a/include/linux/mm.h b/include/linux/mm.h index a92c8d73aeaf..4424784ac374 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -292,36 +292,23 @@ extern pgprot_t protection_map[16]; * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { + struct vm_area_struct *vma; /* Target VMA */ unsigned int flags; /* FAULT_FLAG_xxx flags */ gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ - void __user *virtual_address; /* Faulting virtual address */ + unsigned long address; /* Faulting virtual address */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' */ + pte_t orig_pte; /* Value of PTE at the time of fault */ - struct page *cow_page; /* Handler may choose to COW */ + struct page *cow_page; /* Page handler may use for COW fault */ + struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ struct page *page; /* ->fault handlers should return a * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by * VM_FAULT_ERROR). */ - void *entry; /* ->fault handler can alternatively - * return locked DAX entry. In that - * case handler should return - * VM_FAULT_DAX_LOCKED and fill in - * entry here. - */ -}; - -/* - * Page fault context: passes though page fault handler instead of endless list - * of function arguments. - */ -struct fault_env { - struct vm_area_struct *vma; /* Target VMA */ - unsigned long address; /* Faulting virtual address */ - unsigned int flags; /* FAULT_FLAG_xxx flags */ - pmd_t *pmd; /* Pointer to pmd entry matching - * the 'address' - */ + /* These three entries are valid only while holding ptl lock */ pte_t *pte; /* Pointer to pte entry matching * the 'address'. NULL if the page * table hasn't been allocated. @@ -351,7 +338,7 @@ struct vm_operations_struct { int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*pmd_fault)(struct vm_area_struct *, unsigned long address, pmd_t *, unsigned int flags); - void (*map_pages)(struct fault_env *fe, + void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become @@ -625,8 +612,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, +int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, struct page *page); +int finish_fault(struct vm_fault *vmf); +int finish_mkwrite_fault(struct vm_fault *vmf); #endif /* @@ -1110,7 +1099,7 @@ static inline void clear_page_pfmemalloc(struct page *page) #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ -#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ +#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ @@ -1221,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, + spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, @@ -1270,19 +1261,18 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); +extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + unsigned long addr, void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + struct vm_area_struct **vmas, int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); -long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - struct page **pages, unsigned int gup_flags); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, int write, @@ -2097,7 +2087,7 @@ extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); -extern void filemap_map_pages(struct fault_env *fe, +extern void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4a8acedf4b7d..08d947fc4c59 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -473,6 +473,7 @@ struct mm_struct { */ struct task_struct __rcu *owner; #endif + struct user_namespace *user_ns; /* store ref to file /proc/<pid>/exe symlink points to */ struct file __rcu *exe_file; diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 73fad83acbcb..95d69d498296 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -89,6 +89,8 @@ struct mmc_ext_csd { unsigned int boot_ro_lock; /* ro lock support */ bool boot_ro_lockable; bool ffu_capable; /* Firmware upgrade support */ + bool cmdq_support; /* Command Queue supported */ + unsigned int cmdq_depth; /* Command Queue depth */ #define MMC_FIRMWARE_LEN 8 u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ u8 raw_exception_status; /* 54 */ @@ -207,18 +209,6 @@ struct sdio_func_tuple; #define SDIO_MAX_FUNCS 7 -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, - MMC_BLK_NEW_REQUEST, -}; - /* The number of MMC physical partitions. These consist of: * boot partitions (2), general purpose partitions (4) and * RPMB partition (1) in MMC v4.4. diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 2b953eb8ceae..e33cc748dcfe 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -15,6 +15,18 @@ struct request; struct mmc_data; struct mmc_request; +enum mmc_blk_status { + MMC_BLK_SUCCESS = 0, + MMC_BLK_PARTIAL, + MMC_BLK_CMD_ERR, + MMC_BLK_RETRY, + MMC_BLK_ABORT, + MMC_BLK_DATA_ERR, + MMC_BLK_ECC_ERR, + MMC_BLK_NOMEDIUM, + MMC_BLK_NEW_REQUEST, +}; + struct mmc_command { u32 opcode; u32 arg; @@ -150,7 +162,8 @@ struct mmc_async_req; extern int mmc_stop_bkops(struct mmc_card *); extern int mmc_read_bkops_status(struct mmc_card *); extern struct mmc_async_req *mmc_start_req(struct mmc_host *, - struct mmc_async_req *, int *); + struct mmc_async_req *, + enum mmc_blk_status *); extern int mmc_interrupt_hpi(struct mmc_card *); extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); extern void mmc_wait_for_req_done(struct mmc_host *host, @@ -163,6 +176,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); +extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode); extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #define MMC_ERASE_ARG 0x00000000 diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index f5af2bd35e7f..15db6f83f53f 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -39,6 +39,12 @@ enum { EVENT_DATA_ERROR, }; +enum dw_mci_cookie { + COOKIE_UNMAPPED, + COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */ + COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */ +}; + struct mmc_data; enum { diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0b2439441cc8..8bc884121465 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -93,8 +93,7 @@ struct mmc_host_ops { */ void (*post_req)(struct mmc_host *host, struct mmc_request *req, int err); - void (*pre_req)(struct mmc_host *host, struct mmc_request *req, - bool is_first_req); + void (*pre_req)(struct mmc_host *host, struct mmc_request *req); void (*request)(struct mmc_host *host, struct mmc_request *req); /* @@ -173,7 +172,7 @@ struct mmc_async_req { * Check error status of completed mmc request. * Returns 0 if success otherwise non zero. */ - int (*err_check) (struct mmc_card *, struct mmc_async_req *); + enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *); }; /** @@ -198,14 +197,12 @@ struct mmc_slot { * @is_new_req wake up reason was new request * @is_waiting_last_req mmc context waiting for single running request * @wait wait queue - * @lock lock to protect data fields */ struct mmc_context_info { bool is_done_rcv; bool is_new_req; bool is_waiting_last_req; wait_queue_head_t wait; - spinlock_t lock; }; struct regulator; @@ -495,11 +492,6 @@ static inline int mmc_host_uhs(struct mmc_host *host) MMC_CAP_UHS_DDR50); } -static inline int mmc_host_packed_wr(struct mmc_host *host) -{ - return host->caps2 & MMC_CAP2_PACKED_WR; -} - static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || @@ -546,6 +538,11 @@ static inline void mmc_retune_recheck(struct mmc_host *host) host->retune_now = 1; } +static inline bool mmc_can_retune(struct mmc_host *host) +{ + return host->can_retune == 1; +} + void mmc_retune_pause(struct mmc_host *host); void mmc_retune_unpause(struct mmc_host *host); diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index c376209c70ef..672730acc705 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -84,6 +84,13 @@ #define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ #define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ + /* class 11 */ +#define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */ +#define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */ +#define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */ +#define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */ +#define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */ + static inline bool mmc_op_multi(u32 opcode) { return opcode == MMC_WRITE_MULTIPLE_BLOCK || @@ -272,6 +279,7 @@ struct _mmc_csd { * EXT_CSD fields */ +#define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */ #define EXT_CSD_FLUSH_CACHE 32 /* W */ #define EXT_CSD_CACHE_CTRL 33 /* R/W */ #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ @@ -331,6 +339,8 @@ struct _mmc_csd { #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ #define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ +#define EXT_CSD_CMDQ_DEPTH 307 /* RO */ +#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */ #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ @@ -438,6 +448,13 @@ struct _mmc_csd { #define EXT_CSD_MANUAL_BKOPS_MASK 0x01 /* + * Command Queue + */ +#define EXT_CSD_CMDQ_MODE_ENABLED BIT(0) +#define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0) +#define EXT_CSD_CMDQ_SUPPORTED BIT(0) + +/* * MMC_SWITCH access modes */ diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 3945a8c9d3cb..a7972cd3bc14 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -29,5 +29,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); void mmc_gpiod_request_cd_irq(struct mmc_host *host); +bool mmc_can_gpio_cd(struct mmc_host *host); #endif diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h deleted file mode 100644 index 4ac8b1977b73..000000000000 --- a/include/linux/mutex-debug.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef __LINUX_MUTEX_DEBUG_H -#define __LINUX_MUTEX_DEBUG_H - -#include <linux/linkage.h> -#include <linux/lockdep.h> -#include <linux/debug_locks.h> - -/* - * Mutexes - debugging helpers: - */ - -#define __DEBUG_MUTEX_INITIALIZER(lockname) \ - , .magic = &lockname - -#define mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - \ - __mutex_init((mutex), #mutex, &__key); \ -} while (0) - -extern void mutex_destroy(struct mutex *lock); - -#endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 2cb7531e7d7a..b97870f2debd 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -18,6 +18,7 @@ #include <linux/atomic.h> #include <asm/processor.h> #include <linux/osq_lock.h> +#include <linux/debug_locks.h> /* * Simple, straightforward mutexes with strict semantics: @@ -48,16 +49,12 @@ * locks and tasks (and only those tasks) */ struct mutex { - /* 1: unlocked, 0: locked, negative: locked, possible waiters */ - atomic_t count; + atomic_long_t owner; spinlock_t wait_lock; - struct list_head wait_list; -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) - struct task_struct *owner; -#endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif + struct list_head wait_list; #ifdef CONFIG_DEBUG_MUTEXES void *magic; #endif @@ -66,6 +63,11 @@ struct mutex { #endif }; +static inline struct task_struct *__mutex_owner(struct mutex *lock) +{ + return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03); +} + /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: @@ -79,9 +81,20 @@ struct mutex_waiter { }; #ifdef CONFIG_DEBUG_MUTEXES -# include <linux/mutex-debug.h> + +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname + +extern void mutex_destroy(struct mutex *lock); + #else + # define __DEBUG_MUTEX_INITIALIZER(lockname) + +static inline void mutex_destroy(struct mutex *lock) {} + +#endif + /** * mutex_init - initialize the mutex * @mutex: the mutex to be initialized @@ -90,14 +103,12 @@ struct mutex_waiter { * * It is not allowed to initialize an already locked mutex. */ -# define mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - \ - __mutex_init((mutex), #mutex, &__key); \ +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ } while (0) -static inline void mutex_destroy(struct mutex *lock) {} -#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ @@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {} #endif #define __MUTEX_INITIALIZER(lockname) \ - { .count = ATOMIC_INIT(1) \ + { .owner = ATOMIC_LONG_INIT(0) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ static inline int mutex_is_locked(struct mutex *lock) { - return atomic_read(&lock->count) != 1; + /* + * XXX think about spin_is_locked + */ + return __mutex_owner(lock) != NULL; } /* @@ -175,4 +189,35 @@ extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); +/* + * These values are chosen such that FAIL and SUCCESS match the + * values of the regular mutex_trylock(). + */ +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE, +}; + +/** + * mutex_trylock_recursive - trylock variant that allows recursive locking + * @lock: mutex to be locked + * + * This function should not be used, _ever_. It is purely for hysterical GEM + * raisins, and once those are gone this will be removed. + * + * Returns: + * MUTEX_TRYLOCK_FAILED - trylock failed, + * MUTEX_TRYLOCK_SUCCESS - lock acquired, + * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. + */ +static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock) +{ + if (unlikely(__mutex_owner(lock) == current)) + return MUTEX_TRYLOCK_RECURSIVE; + + return mutex_trylock(lock); +} + #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 91ee3643ccc8..994f7423a74b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -192,6 +192,7 @@ struct net_device_stats { #ifdef CONFIG_RPS #include <linux/static_key.h> extern struct static_key rps_needed; +extern struct static_key rfs_needed; #endif struct neighbour; @@ -316,7 +317,6 @@ struct napi_struct { unsigned int gro_count; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL - spinlock_t poll_lock; int poll_owner; #endif struct net_device *dev; @@ -334,6 +334,16 @@ enum { NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ + NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ +}; + +enum { + NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), + NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), + NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), + NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), + NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), }; enum gro_result { @@ -453,32 +463,22 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -void __napi_complete(struct napi_struct *n); -void napi_complete_done(struct napi_struct *n, int work_done); +bool __napi_complete(struct napi_struct *n); +bool napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: NAPI context * * Mark NAPI processing as complete. * Consider using napi_complete_done() instead. + * Return false if device should avoid rearming interrupts. */ -static inline void napi_complete(struct napi_struct *n) +static inline bool napi_complete(struct napi_struct *n) { return napi_complete_done(n, 0); } /** - * napi_hash_add - add a NAPI to global hashtable - * @napi: NAPI context - * - * Generate a new napi_id and store a @napi under it in napi_hash. - * Used for busy polling (CONFIG_NET_RX_BUSY_POLL). - * Note: This is normally automatically done from netif_napi_add(), - * so might disappear in a future Linux version. - */ -void napi_hash_add(struct napi_struct *napi); - -/** * napi_hash_del - remove a NAPI from global table * @napi: NAPI context * @@ -732,8 +732,8 @@ struct xps_dev_maps { struct rcu_head rcu; struct xps_map __rcu *cpu_map[0]; }; -#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ - (nr_cpu_ids * sizeof(struct xps_map *))) +#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ + (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) #endif /* CONFIG_XPS */ #define TC_MAX_QUEUE 16 @@ -803,6 +803,7 @@ struct tc_to_netdev { struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; }; + bool egress_dev; }; /* These structures hold the attributes of xdp state that are being passed @@ -926,7 +927,7 @@ struct netdev_xdp { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * bool (*ndo_has_offload_stats)(int attr_id) + * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) * Return true if this device supports offload stats of this attr_id. * * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, @@ -1166,7 +1167,7 @@ struct net_device_ops { struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, struct rtnl_link_stats64 *storage); - bool (*ndo_has_offload_stats)(int attr_id); + bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, void *attr_data); @@ -1456,7 +1457,6 @@ enum netdev_priv_flags { * @ptype_specific: Device-specific, protocol-specific packet handlers * * @adj_list: Directly linked devices, like slaves for bonding - * @all_adj_list: All linked devices, *including* neighbours * @features: Currently active device features * @hw_features: User-changeable features * @@ -1506,6 +1506,8 @@ enum netdev_priv_flags { * @if_port: Selectable AUI, TP, ... * @dma: DMA channel * @mtu: Interface MTU value + * @min_mtu: Interface Minimum MTU value + * @max_mtu: Interface Maximum MTU value * @type: Interface hardware type * @hard_header_len: Maximum hardware header length. * @@ -1619,7 +1621,7 @@ enum netdev_priv_flags { * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device * @tc_to_txq: XXX: need comments on this one - * @prio_tc_map XXX: need comments on this one + * @prio_tc_map: XXX: need comments on this one * * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp * @@ -1673,11 +1675,6 @@ struct net_device { struct list_head lower; } adj_list; - struct { - struct list_head upper; - struct list_head lower; - } all_adj_list; - netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; @@ -1726,6 +1723,8 @@ struct net_device { unsigned char dma; unsigned int mtu; + unsigned int min_mtu; + unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; @@ -1922,34 +1921,10 @@ int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) return 0; } -static inline -void netdev_reset_tc(struct net_device *dev) -{ - dev->num_tc = 0; - memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); - memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); -} - -static inline -int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) -{ - if (tc >= dev->num_tc) - return -EINVAL; - - dev->tc_to_txq[tc].count = count; - dev->tc_to_txq[tc].offset = offset; - return 0; -} - -static inline -int netdev_set_num_tc(struct net_device *dev, u8 num_tc) -{ - if (num_tc > TC_MAX_QUEUE) - return -EINVAL; - - dev->num_tc = num_tc; - return 0; -} +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); +void netdev_reset_tc(struct net_device *dev); +int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); +int netdev_set_num_tc(struct net_device *dev, u8 num_tc); static inline int netdev_get_num_tc(struct net_device *dev) @@ -2686,71 +2661,6 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } -struct skb_csum_offl_spec { - __u16 ipv4_okay:1, - ipv6_okay:1, - encap_okay:1, - ip_options_okay:1, - ext_hdrs_okay:1, - tcp_okay:1, - udp_okay:1, - sctp_okay:1, - vlan_okay:1, - no_encapped_ipv6:1, - no_not_encapped:1; -}; - -bool __skb_csum_offload_chk(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec, - bool *csum_encapped, - bool csum_help); - -static inline bool skb_csum_offload_chk(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec, - bool *csum_encapped, - bool csum_help) -{ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return false; - - return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); -} - -static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec) -{ - bool csum_encapped; - - return skb_csum_offload_chk(skb, spec, &csum_encapped, true); -} - -static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) -{ - static const struct skb_csum_offl_spec csum_offl_spec = { - .ipv4_okay = 1, - .ip_options_okay = 1, - .ipv6_okay = 1, - .vlan_okay = 1, - .tcp_okay = 1, - .udp_okay = 1, - }; - - return skb_csum_offload_chk_help(skb, &csum_offl_spec); -} - -static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) -{ - static const struct skb_csum_offl_spec csum_offl_spec = { - .ipv4_okay = 1, - .ip_options_okay = 1, - .tcp_okay = 1, - .udp_okay = 1, - .vlan_okay = 1, - }; - - return skb_csum_offload_chk_help(skb, &csum_offl_spec); -} - static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -3345,7 +3255,7 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); -int dev_change_xdp_fd(struct net_device *dev, int fd); +int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3354,6 +3264,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); +static __always_inline int ____dev_forward_skb(struct net_device *dev, + struct sk_buff *skb) +{ + if (skb_orphan_frags(skb, GFP_ATOMIC) || + unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->priority = 0; + return 0; +} + void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; @@ -3539,6 +3464,17 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) txq->xmit_lock_owner = cpu; } +static inline bool __netif_tx_acquire(struct netdev_queue *txq) +{ + __acquire(&txq->_xmit_lock); + return true; +} + +static inline void __netif_tx_release(struct netdev_queue *txq) +{ + __release(&txq->_xmit_lock); +} + static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); @@ -3640,17 +3576,21 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) #define HARD_TX_LOCK(dev, txq, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_lock(txq, cpu); \ + } else { \ + __netif_tx_acquire(txq); \ } \ } #define HARD_TX_TRYLOCK(dev, txq) \ (((dev->features & NETIF_F_LLTX) == 0) ? \ __netif_tx_trylock(txq) : \ - true ) + __netif_tx_acquire(txq)) #define HARD_TX_UNLOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_unlock(txq); \ + } else { \ + __netif_tx_release(txq); \ } \ } @@ -3869,12 +3809,13 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, updev; \ updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) -/* iterate through upper list, must be called under RCU read lock */ -#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ - for (iter = &(dev)->all_adj_list.upper, \ - updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ - updev; \ - updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) +int netdev_walk_all_upper_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *upper_dev, + void *data), + void *data); + +bool netdev_has_upper_dev_all_rcu(struct net_device *dev, + struct net_device *upper_dev); void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter); @@ -3907,17 +3848,14 @@ struct net_device *netdev_all_lower_get_next(struct net_device *dev, struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, struct list_head **iter); -#define netdev_for_each_all_lower_dev(dev, ldev, iter) \ - for (iter = (dev)->all_adj_list.lower.next, \ - ldev = netdev_all_lower_get_next(dev, &(iter)); \ - ldev; \ - ldev = netdev_all_lower_get_next(dev, &(iter))) - -#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ - for (iter = &(dev)->all_adj_list.lower, \ - ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ - ldev; \ - ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) +int netdev_walk_all_lower_dev(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); +int netdev_walk_all_lower_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); @@ -3994,19 +3932,6 @@ static inline bool can_checksum_protocol(netdev_features_t features, } } -/* Map an ethertype into IP protocol if possible */ -static inline int eproto_to_ipproto(int eproto) -{ - switch (eproto) { - case htons(ETH_P_IP): - return IPPROTO_IP; - case htons(ETH_P_IPV6): - return IPPROTO_IPV6; - default: - return -1; - } -} - #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev); #else diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index abc7fdcb9eb1..a4b97be30b28 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -49,13 +49,11 @@ struct sock; struct nf_hook_state { unsigned int hook; - int thresh; u_int8_t pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; - struct nf_hook_entry __rcu *hook_entries; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; @@ -77,14 +75,42 @@ struct nf_hook_ops { struct nf_hook_entry { struct nf_hook_entry __rcu *next; - struct nf_hook_ops ops; + nf_hookfn *hook; + void *priv; const struct nf_hook_ops *orig_ops; }; +static inline void +nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops) +{ + entry->next = NULL; + entry->hook = ops->hook; + entry->priv = ops->priv; + entry->orig_ops = ops; +} + +static inline int +nf_hook_entry_priority(const struct nf_hook_entry *entry) +{ + return entry->orig_ops->priority; +} + +static inline int +nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, + struct nf_hook_state *state) +{ + return entry->hook(entry->priv, skb, state); +} + +static inline const struct nf_hook_ops * +nf_hook_entry_ops(const struct nf_hook_entry *entry) +{ + return entry->orig_ops; +} + static inline void nf_hook_state_init(struct nf_hook_state *p, - struct nf_hook_entry *hook_entry, unsigned int hook, - int thresh, u_int8_t pf, + u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, @@ -92,13 +118,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; - p->thresh = thresh; p->pf = pf; p->in = indev; p->out = outdev; p->sk = sk; p->net = net; - RCU_INIT_POINTER(p->hook_entries, hook_entry); p->okfn = okfn; } @@ -152,23 +176,20 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif -int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); +int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, + struct nf_hook_entry *entry); /** - * nf_hook_thresh - call a netfilter hook + * nf_hook - call a netfilter hook * * Returns 1 if the hook has allowed the packet to pass. The function * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ -static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, - struct net *net, - struct sock *sk, - struct sk_buff *skb, - struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct net *, struct sock *, struct sk_buff *), - int thresh) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entry *hook_head; int ret = 1; @@ -185,24 +206,16 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, if (hook_head) { struct nf_hook_state state; - nf_hook_state_init(&state, hook_head, hook, thresh, - pf, indev, outdev, sk, net, okfn); + nf_hook_state_init(&state, hook, pf, indev, outdev, + sk, net, okfn); - ret = nf_hook_slow(skb, &state); + ret = nf_hook_slow(skb, &state, hook_head); } rcu_read_unlock(); return ret; } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, - struct sock *sk, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct net *, struct sock *, struct sk_buff *)) -{ - return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN); -} - /* Activate hook; either okfn or kfree_skb called, unless a hook returns NF_STOLEN (in which case, it's up to the hook to deal with the consequences). @@ -221,19 +234,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, */ static inline int -NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, - struct sk_buff *skb, struct net_device *in, - struct net_device *out, - int (*okfn)(struct net *, struct sock *, struct sk_buff *), - int thresh) -{ - int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh); - if (ret == 1) - ret = okfn(net, sk, skb); - return ret; -} - -static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), @@ -242,7 +242,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, int ret; if (!cond || - ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1)) + ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) ret = okfn(net, sk, skb); return ret; } @@ -252,7 +252,10 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN); + int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); + if (ret == 1) + ret = okfn(net, sk, skb); + return ret; } /* Call setsockopt() */ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 83b9a2e0d8d4..8e42253e5d4d 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -79,10 +79,12 @@ enum ip_set_ext_id { IPSET_EXT_ID_MAX, }; +struct ip_set; + /* Extension type */ struct ip_set_ext_type { /* Destroy extension private data (can be NULL) */ - void (*destroy)(void *ext); + void (*destroy)(struct ip_set *set, void *ext); enum ip_set_extension type; enum ipset_cadt_flags flag; /* Size and minimal alignment */ @@ -92,17 +94,6 @@ struct ip_set_ext_type { extern const struct ip_set_ext_type ip_set_extensions[]; -struct ip_set_ext { - u64 packets; - u64 bytes; - u32 timeout; - u32 skbmark; - u32 skbmarkmask; - u32 skbprio; - u16 skbqueue; - char *comment; -}; - struct ip_set_counter { atomic64_t bytes; atomic64_t packets; @@ -122,6 +113,15 @@ struct ip_set_skbinfo { u32 skbmarkmask; u32 skbprio; u16 skbqueue; + u16 __pad; +}; + +struct ip_set_ext { + struct ip_set_skbinfo skbinfo; + u64 packets; + u64 bytes; + char *comment; + u32 timeout; }; struct ip_set; @@ -252,6 +252,10 @@ struct ip_set { u8 flags; /* Default timeout value, if enabled */ u32 timeout; + /* Number of elements (vs timeout) */ + u32 elements; + /* Size of the dynamic extensions (vs timeout) */ + size_t ext_size; /* Element data size */ size_t dsize; /* Offsets to extensions in elements */ @@ -268,7 +272,7 @@ ip_set_ext_destroy(struct ip_set *set, void *data) */ if (SET_WITH_COMMENT(set)) ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy( - ext_comment(data, set)); + set, ext_comment(data, set)); } static inline int @@ -294,104 +298,6 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); } -static inline void -ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) -{ - atomic64_add((long long)bytes, &(counter)->bytes); -} - -static inline void -ip_set_add_packets(u64 packets, struct ip_set_counter *counter) -{ - atomic64_add((long long)packets, &(counter)->packets); -} - -static inline u64 -ip_set_get_bytes(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->bytes); -} - -static inline u64 -ip_set_get_packets(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->packets); -} - -static inline void -ip_set_update_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) -{ - if (ext->packets != ULLONG_MAX && - !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { - ip_set_add_bytes(ext->bytes, counter); - ip_set_add_packets(ext->packets, counter); - } - if (flags & IPSET_FLAG_MATCH_COUNTERS) { - mext->packets = ip_set_get_packets(counter); - mext->bytes = ip_set_get_bytes(counter); - } -} - -static inline void -ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) -{ - mext->skbmark = skbinfo->skbmark; - mext->skbmarkmask = skbinfo->skbmarkmask; - mext->skbprio = skbinfo->skbprio; - mext->skbqueue = skbinfo->skbqueue; -} -static inline bool -ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) -{ - /* Send nonzero parameters only */ - return ((skbinfo->skbmark || skbinfo->skbmarkmask) && - nla_put_net64(skb, IPSET_ATTR_SKBMARK, - cpu_to_be64((u64)skbinfo->skbmark << 32 | - skbinfo->skbmarkmask), - IPSET_ATTR_PAD)) || - (skbinfo->skbprio && - nla_put_net32(skb, IPSET_ATTR_SKBPRIO, - cpu_to_be32(skbinfo->skbprio))) || - (skbinfo->skbqueue && - nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, - cpu_to_be16(skbinfo->skbqueue))); -} - -static inline void -ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext) -{ - skbinfo->skbmark = ext->skbmark; - skbinfo->skbmarkmask = ext->skbmarkmask; - skbinfo->skbprio = ext->skbprio; - skbinfo->skbqueue = ext->skbqueue; -} - -static inline bool -ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) -{ - return nla_put_net64(skb, IPSET_ATTR_BYTES, - cpu_to_be64(ip_set_get_bytes(counter)), - IPSET_ATTR_PAD) || - nla_put_net64(skb, IPSET_ATTR_PACKETS, - cpu_to_be64(ip_set_get_packets(counter)), - IPSET_ATTR_PAD); -} - -static inline void -ip_set_init_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext) -{ - if (ext->bytes != ULLONG_MAX) - atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); - if (ext->packets != ULLONG_MAX) - atomic64_set(&(counter)->packets, (long long)(ext->packets)); -} - /* Netlink CB args */ enum { IPSET_CB_NET = 0, /* net namespace */ @@ -431,6 +337,8 @@ extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, size_t align); extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext); +extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, + const void *e, bool active); static inline int ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) @@ -546,10 +454,8 @@ bitmap_bytes(u32 a, u32 b) #include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_comment.h> - -int -ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, - const void *e, bool active); +#include <linux/netfilter/ipset/ip_set_counter.h> +#include <linux/netfilter/ipset/ip_set_skbinfo.h> #define IP_SET_INIT_KEXT(skb, opt, set) \ { .bytes = (skb)->len, .packets = 1, \ diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h index 5e4662a71e01..366d6c0ea04f 100644 --- a/include/linux/netfilter/ipset/ip_set_bitmap.h +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h @@ -6,8 +6,8 @@ #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF enum { + IPSET_ADD_STORE_PLAIN_TIMEOUT = -1, IPSET_ADD_FAILED = 1, - IPSET_ADD_STORE_PLAIN_TIMEOUT, IPSET_ADD_START_STORED_TIMEOUT, }; diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h index 8d0248525957..8e2bab1e8e90 100644 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ b/include/linux/netfilter/ipset/ip_set_comment.h @@ -20,13 +20,14 @@ ip_set_comment_uget(struct nlattr *tb) * The kadt functions don't use the comment extensions in any way. */ static inline void -ip_set_init_comment(struct ip_set_comment *comment, +ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, const struct ip_set_ext *ext) { struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); size_t len = ext->comment ? strlen(ext->comment) : 0; if (unlikely(c)) { + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } @@ -34,16 +35,17 @@ ip_set_init_comment(struct ip_set_comment *comment, return; if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) len = IPSET_MAX_COMMENT_SIZE; - c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC); + c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); if (unlikely(!c)) return; strlcpy(c->str, ext->comment, len + 1); + set->ext_size += sizeof(*c) + strlen(c->str) + 1; rcu_assign_pointer(comment->c, c); } /* Used only when dumping a set, protected by rcu_read_lock_bh() */ static inline int -ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) +ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) { struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); @@ -58,13 +60,14 @@ ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) * of the set data anymore. */ static inline void -ip_set_comment_free(struct ip_set_comment *comment) +ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) { struct ip_set_comment_rcu *c; c = rcu_dereference_protected(comment->c, 1); if (unlikely(!c)) return; + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h new file mode 100644 index 000000000000..bb6fba480118 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_counter.h @@ -0,0 +1,75 @@ +#ifndef _IP_SET_COUNTER_H +#define _IP_SET_COUNTER_H + +/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline void +ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) +{ + atomic64_add((long long)bytes, &(counter)->bytes); +} + +static inline void +ip_set_add_packets(u64 packets, struct ip_set_counter *counter) +{ + atomic64_add((long long)packets, &(counter)->packets); +} + +static inline u64 +ip_set_get_bytes(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->bytes); +} + +static inline u64 +ip_set_get_packets(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->packets); +} + +static inline void +ip_set_update_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + if (ext->packets != ULLONG_MAX && + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { + ip_set_add_bytes(ext->bytes, counter); + ip_set_add_packets(ext->packets, counter); + } + if (flags & IPSET_FLAG_MATCH_COUNTERS) { + mext->packets = ip_set_get_packets(counter); + mext->bytes = ip_set_get_bytes(counter); + } +} + +static inline bool +ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) +{ + return nla_put_net64(skb, IPSET_ATTR_BYTES, + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || + nla_put_net64(skb, IPSET_ATTR_PACKETS, + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); +} + +static inline void +ip_set_init_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext) +{ + if (ext->bytes != ULLONG_MAX) + atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); + if (ext->packets != ULLONG_MAX) + atomic64_set(&(counter)->packets, (long long)(ext->packets)); +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_COUNTER_H */ diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h new file mode 100644 index 000000000000..29d7ef2bc3fa --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_skbinfo.h @@ -0,0 +1,46 @@ +#ifndef _IP_SET_SKBINFO_H +#define _IP_SET_SKBINFO_H + +/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbinfo = *skbinfo; +} + +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + *skbinfo = ext->skbinfo; +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_SKBINFO_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 1d6a935c1ac5..bfb3531fd88a 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -40,7 +40,7 @@ ip_set_timeout_uget(struct nlattr *tb) } static inline bool -ip_set_timeout_expired(unsigned long *t) +ip_set_timeout_expired(const unsigned long *t) { return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); } @@ -63,7 +63,7 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) } static inline u32 -ip_set_timeout_get(unsigned long *timeout) +ip_set_timeout_get(const unsigned long *timeout) { return *timeout == IPSET_ELEM_PERMANENT ? 0 : jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h index 40dcc82058d1..ff721d7325cf 100644 --- a/include/linux/netfilter/nf_conntrack_dccp.h +++ b/include/linux/netfilter/nf_conntrack_dccp.h @@ -25,7 +25,7 @@ enum ct_dccp_roles { #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) #ifdef __KERNEL__ -#include <net/netfilter/nf_conntrack_tuple.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> struct nf_ct_dccp { u_int8_t role[IP_CT_DIR_MAX]; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 2ad1a2b289b5..5117e4d2ddfa 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -4,6 +4,7 @@ #include <linux/netdevice.h> #include <linux/static_key.h> +#include <linux/netfilter.h> #include <uapi/linux/netfilter/x_tables.h> /* Test a struct->invflags and a boolean for inequality */ @@ -17,14 +18,9 @@ * @target: the target extension * @matchinfo: per-match data * @targetinfo: per-target data - * @net network namespace through which the action was invoked - * @in: input netdevice - * @out: output netdevice + * @state: pointer to hook state this packet came from * @fragoff: packet is a fragment, this is the data offset * @thoff: position of transport header relative to skb->data - * @hook: hook number given packet came from - * @family: Actual NFPROTO_* through which the function is invoked - * (helpful when match->family == NFPROTO_UNSPEC) * * Fields written to by extensions: * @@ -38,15 +34,47 @@ struct xt_action_param { union { const void *matchinfo, *targinfo; }; - struct net *net; - const struct net_device *in, *out; + const struct nf_hook_state *state; int fragoff; unsigned int thoff; - unsigned int hooknum; - u_int8_t family; bool hotdrop; }; +static inline struct net *xt_net(const struct xt_action_param *par) +{ + return par->state->net; +} + +static inline struct net_device *xt_in(const struct xt_action_param *par) +{ + return par->state->in; +} + +static inline const char *xt_inname(const struct xt_action_param *par) +{ + return par->state->in->name; +} + +static inline struct net_device *xt_out(const struct xt_action_param *par) +{ + return par->state->out; +} + +static inline const char *xt_outname(const struct xt_action_param *par) +{ + return par->state->out->name; +} + +static inline unsigned int xt_hooknum(const struct xt_action_param *par) +{ + return par->state->hook; +} + +static inline u_int8_t xt_family(const struct xt_action_param *par) +{ + return par->state->pf; +} + /** * struct xt_mtchk_param - parameters for match extensions' * checkentry functions @@ -375,38 +403,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a, return ret; } +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char __percpu *mem; +}; -/* On SMP, ip(6)t_entry->counters.pcnt holds address of the - * real (percpu) counter. On !SMP, its just the packet count, - * so nothing needs to be done there. - * - * xt_percpu_counter_alloc returns the address of the percpu - * counter, or 0 on !SMP. We force an alignment of 16 bytes - * so that bytes/packets share a common cache line. - * - * Hence caller must use IS_ERR_VALUE to check for error, this - * allows us to return 0 for single core systems without forcing - * callers to deal with SMP vs. NONSMP issues. - */ -static inline unsigned long xt_percpu_counter_alloc(void) -{ - if (nr_cpu_ids > 1) { - void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), - sizeof(struct xt_counters)); - - if (res == NULL) - return -ENOMEM; - - return (__force unsigned long) res; - } - - return 0; -} -static inline void xt_percpu_counter_free(u64 pcnt) -{ - if (nr_cpu_ids > 1) - free_percpu((void __percpu *) (unsigned long) pcnt); -} +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, + struct xt_counters *counter); +void xt_percpu_counter_free(struct xt_counters *cnt); static inline struct xt_counters * xt_get_this_cpu_counter(struct xt_counters *cnt) diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index 33e37fb41d5d..59476061de86 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h @@ -19,6 +19,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb) { struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress); struct nf_hook_state state; + int ret; /* Must recheck the ingress hook head, in the event it became NULL * after the check in nf_hook_ingress_active evaluated to true. @@ -26,10 +27,14 @@ static inline int nf_hook_ingress(struct sk_buff *skb) if (unlikely(!e)) return 0; - nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN, + nf_hook_state_init(&state, NF_NETDEV_INGRESS, NFPROTO_NETDEV, skb->dev, NULL, NULL, dev_net(skb->dev), NULL); - return nf_hook_slow(skb, &state); + ret = nf_hook_slow(skb, &state, e); + if (ret == 0) + return -1; + + return ret; } static inline void nf_hook_ingress_init(struct net_device *dev) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index b25ee9ffdbe6..1828900c9411 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) struct net_device *dev = napi->dev; if (dev && dev->npinfo) { - spin_lock(&napi->poll_lock); - napi->poll_owner = smp_processor_id(); + int owner = smp_processor_id(); + + while (cmpxchg(&napi->poll_owner, -1, owner) != -1) + cpu_relax(); + return napi; } return NULL; @@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have) { struct napi_struct *napi = have; - if (napi) { - napi->poll_owner = -1; - spin_unlock(&napi->poll_lock); - } + if (napi) + smp_store_release(&napi->poll_owner, -1); } static inline bool netpoll_tx_running(struct net_device *dev) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a78c35cff1ae..aacca824a6ae 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,23 @@ #include <linux/sched.h> #include <asm/irq.h> +/* + * The run state of the lockup detectors is controlled by the content of the + * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - + * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. + * + * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' + * are variables that are only used as an 'interface' between the parameters + * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The + * 'watchdog_thresh' variable is handled differently because its value is not + * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' + * is equal zero. + */ +#define NMI_WATCHDOG_ENABLED_BIT 0 +#define SOFT_WATCHDOG_ENABLED_BIT 1 +#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) +#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) + /** * touch_nmi_watchdog - restart NMI watchdog timeout. * @@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled; extern int soft_watchdog_enabled; extern int watchdog_user_enabled; extern int watchdog_thresh; +extern unsigned long watchdog_enabled; extern unsigned long *watchdog_cpumask_bits; +#ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; +#else +#define sysctl_softlockup_all_cpu_backtrace 0 +#define sysctl_hardlockup_all_cpu_backtrace 0 +#endif +extern bool is_hardlockup(void); struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 6f47562d477b..50a7dbe88cf3 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -896,7 +896,7 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb) } /** - * ntb_mw_count() - get the number of scratchpads + * ntb_spad_count() - get the number of scratchpads * @ntb: NTB device context. * * Hardware and topology may support a different number of scratchpads. diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h new file mode 100644 index 000000000000..f21471f7ee40 --- /dev/null +++ b/include/linux/nvme-fc-driver.h @@ -0,0 +1,851 @@ +/* + * Copyright (c) 2016, Avago Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _NVME_FC_DRIVER_H +#define _NVME_FC_DRIVER_H 1 + + +/* + * ********************** LLDD FC-NVME Host API ******************** + * + * For FC LLDD's that are the NVME Host role. + * + * ****************************************************************** + */ + + + +/* FC Port role bitmask - can merge with FC Port Roles in fc transport */ +#define FC_PORT_ROLE_NVME_INITIATOR 0x10 +#define FC_PORT_ROLE_NVME_TARGET 0x11 +#define FC_PORT_ROLE_NVME_DISCOVERY 0x12 + + +/** + * struct nvme_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Host role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvme_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_role; + u32 port_id; +}; + + +/** + * struct nvmefc_ls_req - Request structure passed from NVME-FC transport + * to LLDD in order to perform a NVME FC-4 LS + * request and obtain a response. + * + * Values set by the NVME-FC layer prior to calling the LLDD ls_req + * entrypoint. + * @rqstaddr: pointer to request buffer + * @rqstdma: PCI DMA address of request buffer + * @rqstlen: Length, in bytes, of request buffer + * @rspaddr: pointer to response buffer + * @rspdma: PCI DMA address of response buffer + * @rsplen: Length, in bytes, of response buffer + * @timeout: Maximum amount of time, in seconds, to wait for the LS response. + * If timeout exceeded, LLDD to abort LS exchange and complete + * LS request with error status. + * @private: pointer to memory allocated alongside the ls request structure + * that is specifically for the LLDD to use while processing the + * request. The length of the buffer corresponds to the + * lsrqst_priv_sz value specified in the nvme_fc_port_template + * supplied by the LLDD. + * @done: The callback routine the LLDD is to invoke upon completion of + * the LS request. req argument is the pointer to the original LS + * request structure. Status argument must be 0 upon success, a + * negative errno on failure (example: -ENXIO). + */ +struct nvmefc_ls_req { + void *rqstaddr; + dma_addr_t rqstdma; + u32 rqstlen; + void *rspaddr; + dma_addr_t rspdma; + u32 rsplen; + u32 timeout; + + void *private; + + void (*done)(struct nvmefc_ls_req *req, int status); + +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +enum nvmefc_fcp_datadir { + NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */ + NVMEFC_FCP_WRITE, + NVMEFC_FCP_READ, +}; + + +#define NVME_FC_MAX_SEGMENTS 256 + +/** + * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport + * to LLDD in order to perform a NVME FCP IO operation. + * + * Values set by the NVME-FC layer prior to calling the LLDD fcp_io + * entrypoint. + * @cmdaddr: pointer to the FCP CMD IU buffer + * @rspaddr: pointer to the FCP RSP IU buffer + * @cmddma: PCI DMA address of the FCP CMD IU buffer + * @rspdma: PCI DMA address of the FCP RSP IU buffer + * @cmdlen: Length, in bytes, of the FCP CMD IU buffer + * @rsplen: Length, in bytes, of the FCP RSP IU buffer + * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer + * @sg_table: scatter/gather structure for payload data + * @first_sgl: memory for 1st scatter/gather list segment for payload data + * @sg_cnt: number of elements in the scatter/gather list + * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx) + * @sqid: The nvme SQID the command is being issued on + * @done: The callback routine the LLDD is to invoke upon completion of + * the FCP operation. req argument is the pointer to the original + * FCP IO operation. + * @private: pointer to memory allocated alongside the FCP operation + * request structure that is specifically for the LLDD to use + * while processing the operation. The length of the buffer + * corresponds to the fcprqst_priv_sz value specified in the + * nvme_fc_port_template supplied by the LLDD. + * + * Values set by the LLDD indicating completion status of the FCP operation. + * Must be set prior to calling the done() callback. + * @transferred_length: amount of payload data, in bytes, that were + * transferred. Should equal payload_length on success. + * @rcv_rsplen: length, in bytes, of the FCP RSP IU received. + * @status: Completion status of the FCP operation. must be 0 upon success, + * NVME_SC_FC_xxx value upon failure. Note: this is NOT a + * reflection of the NVME CQE completion status. Only the status + * of the FCP operation at the NVME-FC level. + */ +struct nvmefc_fcp_req { + void *cmdaddr; + void *rspaddr; + dma_addr_t cmddma; + dma_addr_t rspdma; + u16 cmdlen; + u16 rsplen; + + u32 payload_length; + struct sg_table sg_table; + struct scatterlist *first_sgl; + int sg_cnt; + enum nvmefc_fcp_datadir io_dir; + + __le16 sqid; + + void (*done)(struct nvmefc_fcp_req *req); + + void *private; + + u32 transferred_length; + u16 rcv_rsplen; + u32 status; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/* + * Direct copy of fc_port_state enum. For later merging + */ +enum nvme_fc_obj_state { + FC_OBJSTATE_UNKNOWN, + FC_OBJSTATE_NOTPRESENT, + FC_OBJSTATE_ONLINE, + FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */ + FC_OBJSTATE_BLOCKED, + FC_OBJSTATE_BYPASSED, + FC_OBJSTATE_DIAGNOSTICS, + FC_OBJSTATE_LINKDOWN, + FC_OBJSTATE_ERROR, + FC_OBJSTATE_LOOPBACK, + FC_OBJSTATE_DELETED, +}; + + +/** + * struct nvme_fc_local_port - structure used between NVME-FC transport and + * a LLDD to reference a local NVME host port. + * Allocated/created by the nvme_fc_register_localport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport host port number + * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @private: pointer to memory allocated alongside the local port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the local_priv_sz + * value specified in the nvme_fc_port_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link state. LLDD + * may reference fields directly to change them. Initialized by the + * port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the port. + */ +struct nvme_fc_local_port { + /* static/read-only fields */ + u32 port_num; + u32 port_role; + u64 node_name; + u64 port_name; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvme_fc_remote_port - structure used between NVME-FC transport and + * a LLDD to reference a remote NVME subsystem port. + * Allocated/created by the nvme_fc_register_remoteport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport remote subsystem port number + * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @localport: pointer to the NVME-FC local host port the subsystem is + * connected to. + * @private: pointer to memory allocated alongside the remote port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the remote_priv_sz + * value specified in the nvme_fc_port_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link or login + * state. LLDD may reference fields directly to change them. Initialized by + * the port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the remote port. Valid values are + * ONLINE or UNKNOWN. + */ +struct nvme_fc_remote_port { + /* static fields */ + u32 port_num; + u32 port_role; + u64 node_name; + u64 port_name; + + struct nvme_fc_local_port *localport; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvme_fc_port_template - structure containing static entrypoints and + * operational parameters for an LLDD that supports NVME host + * behavior. Passed by reference in port registrations. + * NVME-FC transport remembers template reference and may + * access it during runtime operation. + * + * Host/Initiator Transport Entrypoints/Parameters: + * + * @localport_delete: The LLDD initiates deletion of a localport via + * nvme_fc_deregister_localport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the localport has been deleted. + * Entrypoint is Mandatory. + * + * @remoteport_delete: The LLDD initiates deletion of a remoteport via + * nvme_fc_deregister_remoteport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the remoteport has been deleted. + * Entrypoint is Mandatory. + * + * @create_queue: Upon creating a host<->controller association, queues are + * created such that they can be affinitized to cpus/cores. This + * callback into the LLDD to notify that a controller queue is being + * created. The LLDD may choose to allocate an associated hw queue + * or map it onto a shared hw queue. Upon return from the call, the + * LLDD specifies a handle that will be given back to it for any + * command that is posted to the controller queue. The handle can + * be used by the LLDD to map quickly to the proper hw queue for + * command execution. The mask of cpu's that will map to this queue + * at the block-level is also passed in. The LLDD should use the + * queue id and/or cpu masks to ensure proper affinitization of the + * controller queue to the hw queue. + * Entrypoint is Optional. + * + * @delete_queue: This is the inverse of the crete_queue. During + * host<->controller association teardown, this routine is called + * when a controller queue is being terminated. Any association with + * a hw queue should be termined. If there is a unique hw queue, the + * hw queue should be torn down. + * Entrypoint is Optional. + * + * @poll_queue: Called to poll for the completion of an io on a blk queue. + * Entrypoint is Optional. + * + * @ls_req: Called to issue a FC-NVME FC-4 LS service request. + * The nvme_fc_ls_req structure will fully describe the buffers for + * the request payload and where to place the response payload. The + * LLDD is to allocate an exchange, issue the LS request, obtain the + * LS response, and call the "done" routine specified in the request + * structure (argument to done is the ls request structure itself). + * Entrypoint is Mandatory. + * + * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for + * an admin queue or an i/o queue. The nvmefc_fcp_req structure will + * fully describe the io: the buffer containing the FC-NVME CMD IU + * (which contains the SQE), the sg list for the payload if applicable, + * and the buffer to place the FC-NVME RSP IU into. The LLDD will + * complete the i/o, indicating the amount of data transferred or + * any transport error, and call the "done" routine specified in the + * request structure (argument to done is the fcp request structure + * itself). + * Entrypoint is Mandatory. + * + * @ls_abort: called to request the LLDD to abort the indicated ls request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the ls request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory. + * + * @fcp_abort: called to request the LLDD to abort the indicated fcp request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the fcp request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory. + * + * @max_hw_queues: indicates the maximum number of hw queues the LLDD + * supports for cpu affinitization. + * Value is Mandatory. Must be at least 1. + * + * @max_sgl_segments: indicates the maximum number of sgl segments supported + * by the LLDD + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @max_dif_sgl_segments: indicates the maximum number of sgl segments + * supported by the LLDD for DIF operations. + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @dma_boundary: indicates the dma address boundary where dma mappings + * will be split across. + * Value is Mandatory. Typical value is 0xFFFFFFFF to split across + * 4Gig address boundarys + * + * @local_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a localport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the localport->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @remote_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a remoteport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the remoteport->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a ls request structure is allocated. The additional + * memory area solely for the of the LLDD and its location is + * specified by the ls_request->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a fcp request structure is allocated. The additional + * memory area solely for the of the LLDD and its location is + * specified by the fcp_request->private pointer. + * Value is Mandatory. Allowed to be zero. + */ +struct nvme_fc_port_template { + /* initiator-based functions */ + void (*localport_delete)(struct nvme_fc_local_port *); + void (*remoteport_delete)(struct nvme_fc_remote_port *); + int (*create_queue)(struct nvme_fc_local_port *, + unsigned int qidx, u16 qsize, + void **handle); + void (*delete_queue)(struct nvme_fc_local_port *, + unsigned int qidx, void *handle); + void (*poll_queue)(struct nvme_fc_local_port *, void *handle); + int (*ls_req)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + struct nvmefc_ls_req *); + int (*fcp_io)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + void *hw_queue_handle, + struct nvmefc_fcp_req *); + void (*ls_abort)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + struct nvmefc_ls_req *); + void (*fcp_abort)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + void *hw_queue_handle, + struct nvmefc_fcp_req *); + + u32 max_hw_queues; + u16 max_sgl_segments; + u16 max_dif_sgl_segments; + u64 dma_boundary; + + /* sizes of additional private data for data structures */ + u32 local_priv_sz; + u32 remote_priv_sz; + u32 lsrqst_priv_sz; + u32 fcprqst_priv_sz; +}; + + +/* + * Initiator/Host functions + */ + +int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + struct nvme_fc_port_template *template, + struct device *dev, + struct nvme_fc_local_port **lport_p); + +int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport); + +int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, + struct nvme_fc_port_info *pinfo, + struct nvme_fc_remote_port **rport_p); + +int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport); + + + +/* + * *************** LLDD FC-NVME Target/Subsystem API *************** + * + * For FC LLDD's that are the NVME Subsystem role + * + * ****************************************************************** + */ + +/** + * struct nvmet_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Subsystem role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvmet_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_id; +}; + + +/** + * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC + * layer to represent the exchange context for + * a FC-NVME Link Service (LS). + * + * The structure is allocated by the LLDD whenever a LS Request is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure + * will be passed back to the LLDD when the response is to be transmit. + * The LLDD is to use the address to map back to the LLDD exchange structure + * which maintains information such as the targetport the LS was received + * on, the remote FC NVME initiator that sent the LS, and any FC exchange + * context. Upon completion of the LS response transmit, the address of the + * structure will be passed back to the LS rsp done() routine, allowing the + * nvmet-fc layer to release dma resources. Upon completion of the done() + * routine, no further access will be made by the nvmet-fc layer and the + * LLDD can de-allocate the structure. + * + * Field initialization: + * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc + * layer will fully set the fields in order to specify the response + * payload buffer and its length as well as the done routine to be called + * upon compeletion of the transmit. The nvmet-fc layer will also set a + * private pointer for its own use in the done routine. + * + * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp + * entrypoint. + * @rspbuf: pointer to the LS response buffer + * @rspdma: PCI DMA address of the LS response buffer + * @rsplen: Length, in bytes, of the LS response buffer + * @done: The callback routine the LLDD is to invoke upon completion of + * transmitting the LS response. req argument is the pointer to + * the original ls request. + * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used + * as part of the NVMET-FC processing. The LLDD is not to access + * this pointer. + */ +struct nvmefc_tgt_ls_req { + void *rspbuf; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_tgt_ls_req *req); + void *nvmet_fc_private; /* LLDD is not to access !! */ +}; + +/* Operations that NVME-FC layer may request the LLDD to perform for FCP */ +enum { + NVMET_FCOP_READDATA = 1, /* xmt data to initiator */ + NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */ + NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send + * rsp as well + */ + NVMET_FCOP_RSP = 4, /* send rsp frame */ + NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */ + NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */ + NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */ +}; + +/** + * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC + * layer to represent the exchange context and + * the specific FC-NVME IU operation(s) to perform + * for a FC-NVME FCP IO. + * + * Structure used between LLDD and nvmet-fc layer to represent the exchange + * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related + * memory transfers, and its assocated cqe transfer). + * + * The structure is allocated by the LLDD whenever a FCP CMD IU is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure + * will be passed back to the LLDD for the data operations and transmit of + * the response. The LLDD is to use the address to map back to the LLDD + * exchange structure which maintains information such as the targetport + * the FCP I/O was received on, the remote FC NVME initiator that sent the + * FCP I/O, and any FC exchange context. Upon completion of the FCP target + * operation, the address of the structure will be passed back to the FCP + * op done() routine, allowing the nvmet-fc layer to release dma resources. + * Upon completion of the done() routine for either RSP or ABORT ops, no + * further access will be made by the nvmet-fc layer and the LLDD can + * de-allocate the structure. + * + * Field initialization: + * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for an FCP target operation, the nvmet-fc + * layer will fully set the fields in order to specify the scattergather + * list, the transfer length, as well as the done routine to be called + * upon compeletion of the operation. The nvmet-fc layer will also set a + * private pointer for its own use in the done routine. + * + * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !! + * + * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op + * entrypoint. + * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx) + * @hwqid: Specifies the hw queue index (0..N-1, where N is the + * max_hw_queues value from the LLD's nvmet_fc_target_template) + * that the operation is to use. + * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred. + * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops. + * @timeout: amount of time, in seconds, to wait for a response from the NVME + * host. A value of 0 is an infinite wait. + * Valid only for the following ops: + * WRITEDATA: caps the wait for data reception + * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used) + * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload + * that is to be transferred. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @ba_rjt: Contains the BA_RJT payload that is to be transferred. + * Valid only for the NVMET_FCOP_BA_RJT op. + * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @sg_cnt: Number of valid entries in the scatter/gather list. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @rspaddr: pointer to the FCP RSP IU buffer to be transmit + * Used by RSP and READDATA_RSP ops + * @rspdma: PCI DMA address of the FCP RSP IU buffer + * Used by RSP and READDATA_RSP ops + * @rsplen: Length, in bytes, of the FCP RSP IU buffer + * Used by RSP and READDATA_RSP ops + * @done: The callback routine the LLDD is to invoke upon completion of + * the operation. req argument is the pointer to the original + * FCP subsystem op request. + * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used + * as part of the NVMET-FC processing. The LLDD is not to + * reference this field. + * + * Values set by the LLDD indicating completion status of the FCP operation. + * Must be set prior to calling the done() callback. + * @transferred_length: amount of DATA_OUT payload data received by a + * a WRITEDATA operation. If not a WRITEDATA operation, value must + * be set to 0. Should equal transfer_length on success. + * @fcp_error: status of the FCP operation. Must be 0 on success; on failure + * must be a NVME_SC_FC_xxxx value. + */ +struct nvmefc_tgt_fcp_req { + u8 op; + u16 hwqid; + u32 offset; + u32 timeout; + u32 transfer_length; + struct fc_ba_rjt ba_rjt; + struct scatterlist sg[NVME_FC_MAX_SEGMENTS]; + int sg_cnt; + void *rspaddr; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_tgt_fcp_req *); + + void *nvmet_fc_private; /* LLDD is not to access !! */ + + u32 transferred_length; + int fcp_error; +}; + + +/* Target Features (Bit fields) LLDD supports */ +enum { + NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0), + /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which + * sends (the last) Read Data sequence followed by the RSP + * sequence in one LLDD operation. Errors during Data + * sequence transmit must not allow RSP sequence to be sent. + */ + NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), + /* Bit 1: When 0, the LLDD will deliver FCP CMD + * on the CPU it should be affinitized to. Thus work will + * be scheduled on the cpu received on. When 1, the LLDD + * may not deliver the CMD on the CPU it should be worked + * on. The transport should pick a cpu to schedule the work + * on. + */ +}; + + +/** + * struct nvmet_fc_target_port - structure used between NVME-FC transport and + * a LLDD to reference a local NVME subsystem port. + * Allocated/created by the nvme_fc_register_targetport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport subsytem port number + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @private: pointer to memory allocated alongside the local port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the target_priv_sz + * value specified in the nvme_fc_target_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link state. LLDD + * may reference fields directly to change them. Initialized by the + * port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the port. + */ +struct nvmet_fc_target_port { + /* static/read-only fields */ + u32 port_num; + u64 node_name; + u64 port_name; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvmet_fc_target_template - structure containing static entrypoints + * and operational parameters for an LLDD that supports NVME + * subsystem behavior. Passed by reference in port + * registrations. NVME-FC transport remembers template + * reference and may access it during runtime operation. + * + * Subsystem/Target Transport Entrypoints/Parameters: + * + * @targetport_delete: The LLDD initiates deletion of a targetport via + * nvmet_fc_unregister_targetport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the targetport has been deleted. + * Entrypoint is Mandatory. + * + * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. + * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange + * structure specified in the nvmet_fc_rcv_ls_req() call made when + * the LS request was received. The structure will fully describe + * the buffers for the response payload and the dma address of the + * payload. The LLDD is to transmit the response (or return a non-zero + * errno status), and upon completion of the transmit, call the + * "done" routine specified in the nvmefc_tgt_ls_req structure + * (argument to done is the ls reqwuest structure itself). + * After calling the done routine, the LLDD shall consider the + * LS handling complete and the nvmefc_tgt_ls_req structure may + * be freed/released. + * Entrypoint is Mandatory. + * + * @fcp_op: Called to perform a data transfer, transmit a response, or + * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same + * LLDD-supplied exchange structure specified in the + * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received. + * The op field in the structure shall indicate the operation for + * the LLDD to perform relative to the io. + * NVMET_FCOP_READDATA operation: the LLDD is to send the + * payload data (described by sglist) to the host in 1 or + * more FC sequences (preferrably 1). Note: the fc-nvme layer + * may call the READDATA operation multiple times for longer + * payloads. + * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the + * payload data (described by sglist) from the host via 1 or + * more FC sequences (preferrably 1). The LLDD is to generate + * the XFER_RDY IU(s) corresponding to the data being requested. + * Note: the FC-NVME layer may call the WRITEDATA operation + * multiple times for longer payloads. + * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the + * payload data (described by sglist) to the host in 1 or + * more FC sequences (preferrably 1). If an error occurs during + * payload data transmission, the LLDD is to set the + * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then + * consider the operation complete. On error, the LLDD is to not + * transmit the FCP_RSP iu. If all payload data is transferred + * successfully, the LLDD is to update the nvmefc_tgt_fcp_req + * transferred_length field and may subsequently transmit the + * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen). + * The LLDD is to await FCP_CONF reception to confirm the RSP + * reception by the host. The LLDD may retramsit the FCP_RSP iu + * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon + * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req + * fcp_error field and consider the operation complete.. + * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload + * (described by rspbuf, rspdma, rsplen). The LLDD is to await + * FCP_CONF reception to confirm the RSP reception by the host. + * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME. + * Upon reception of FCP_CONF, or upon FCP_CONF failure, the + * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and + * consider the operation complete.. + * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange + * corresponding to the fcp operation. The LLDD shall send + * ABTS and follow FC exchange abort-multi rules, including + * ABTS retries and possible logout. + * Upon completing the indicated operation, the LLDD is to set the + * status fields for the operation (tranferred_length and fcp_error + * status) in the request, then all the "done" routine + * indicated in the fcp request. Upon return from the "done" + * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation + * the fc-nvme layer will not longer reference the fcp request, + * allowing the LLDD to free/release the fcp request. + * Note: when calling the done routine for READDATA or WRITEDATA + * operations, the fc-nvme layer may immediate convert, in the same + * thread and before returning to the LLDD, the fcp operation to + * the next operation for the fcp io and call the LLDDs fcp_op + * call again. If fields in the fcp request are to be accessed post + * the done call, the LLDD should save their values prior to calling + * the done routine, and inspect the save values after the done + * routine. + * Returns 0 on success, -<errno> on failure (Ex: -EIO) + * Entrypoint is Mandatory. + * + * @max_hw_queues: indicates the maximum number of hw queues the LLDD + * supports for cpu affinitization. + * Value is Mandatory. Must be at least 1. + * + * @max_sgl_segments: indicates the maximum number of sgl segments supported + * by the LLDD + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @max_dif_sgl_segments: indicates the maximum number of sgl segments + * supported by the LLDD for DIF operations. + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @dma_boundary: indicates the dma address boundary where dma mappings + * will be split across. + * Value is Mandatory. Typical value is 0xFFFFFFFF to split across + * 4Gig address boundarys + * + * @target_features: The LLDD sets bits in this field to correspond to + * optional features that are supported by the LLDD. + * Refer to the NVMET_FCTGTFEAT_xxx values. + * Value is Mandatory. Allowed to be zero. + * + * @target_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a targetport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the targetport->private pointer. + * Value is Mandatory. Allowed to be zero. + */ +struct nvmet_fc_target_template { + void (*targetport_delete)(struct nvmet_fc_target_port *tgtport); + int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *tls_req); + int (*fcp_op)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *); + + u32 max_hw_queues; + u16 max_sgl_segments; + u16 max_dif_sgl_segments; + u64 dma_boundary; + + u32 target_features; + + u32 target_priv_sz; +}; + + +int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo, + struct nvmet_fc_target_template *template, + struct device *dev, + struct nvmet_fc_target_port **tgtport_p); + +int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport); + +int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *lsreq, + void *lsreqbuf, u32 lsreqbuf_len); + +int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq, + void *cmdiubuf, u32 cmdiubuf_len); + +#endif /* _NVME_FC_DRIVER_H */ diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h new file mode 100644 index 000000000000..4b45226bd604 --- /dev/null +++ b/include/linux/nvme-fc.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2016 Avago Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, + * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO + * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package + * + */ + +/* + * This file contains definitions relative to FC-NVME r1.11 and a few + * newer items + */ + +#ifndef _NVME_FC_H +#define _NVME_FC_H 1 + + +#define NVME_CMD_SCSI_ID 0xFD +#define NVME_CMD_FC_ID FC_TYPE_NVME + +/* FC-NVME Cmd IU Flags */ +#define FCNVME_CMD_FLAGS_DIRMASK 0x03 +#define FCNVME_CMD_FLAGS_WRITE 0x01 +#define FCNVME_CMD_FLAGS_READ 0x02 + +struct nvme_fc_cmd_iu { + __u8 scsi_id; + __u8 fc_id; + __be16 iu_len; + __u8 rsvd4[3]; + __u8 flags; + __be64 connection_id; + __be32 csn; + __be32 data_len; + struct nvme_command sqe; + __be32 rsvd88[2]; +}; + +#define NVME_FC_SIZEOF_ZEROS_RSP 12 + +struct nvme_fc_ersp_iu { + __u8 rsvd0[2]; + __be16 iu_len; + __be32 rsn; + __be32 xfrd_len; + __be32 rsvd12; + struct nvme_completion cqe; + /* for now - no additional payload */ +}; + + +/* FC-NVME r1.03/16-119v0 NVME Link Services */ +enum { + FCNVME_LS_RSVD = 0, + FCNVME_LS_RJT = 1, + FCNVME_LS_ACC = 2, + FCNVME_LS_CREATE_ASSOCIATION = 3, + FCNVME_LS_CREATE_CONNECTION = 4, + FCNVME_LS_DISCONNECT = 5, +}; + +/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */ +enum { + FCNVME_LSDESC_RSVD = 0x0, + FCNVME_LSDESC_RQST = 0x1, + FCNVME_LSDESC_RJT = 0x2, + FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3, + FCNVME_LSDESC_CREATE_CONN_CMD = 0x4, + FCNVME_LSDESC_DISCONN_CMD = 0x5, + FCNVME_LSDESC_CONN_ID = 0x6, + FCNVME_LSDESC_ASSOC_ID = 0x7, +}; + + +/* ********** start of Link Service Descriptors ********** */ + + +/* + * fills in length of a descriptor. Struture minus descriptor header + */ +static inline __be32 fcnvme_lsdesc_len(size_t sz) +{ + return cpu_to_be32(sz - (2 * sizeof(u32))); +} + + +struct fcnvme_ls_rqst_w0 { + u8 ls_cmd; /* FCNVME_LS_xxx */ + u8 zeros[3]; +}; + +/* FCNVME_LSDESC_RQST */ +struct fcnvme_lsdesc_rqst { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + struct fcnvme_ls_rqst_w0 w0; + __be32 rsvd12; +}; + + + + +/* FCNVME_LSDESC_RJT */ +struct fcnvme_lsdesc_rjt { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + u8 rsvd8; + + /* + * Reject reason and explanaction codes are generic + * to ELs's from LS-3. + */ + u8 reason_code; + u8 reason_explanation; + + u8 vendor; + __be32 rsvd12; +}; + + +#define FCNVME_ASSOC_HOSTID_LEN 64 +#define FCNVME_ASSOC_HOSTNQN_LEN 256 +#define FCNVME_ASSOC_SUBNQN_LEN 256 + +/* FCNVME_LSDESC_CREATE_ASSOC_CMD */ +struct fcnvme_lsdesc_cr_assoc_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be16 ersp_ratio; + __be16 rsvd10; + __be32 rsvd12[9]; + __be16 cntlid; + __be16 sqsize; + __be32 rsvd52; + u8 hostid[FCNVME_ASSOC_HOSTID_LEN]; + u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN]; + u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN]; + u8 rsvd632[384]; +}; + +/* FCNVME_LSDESC_CREATE_CONN_CMD */ +struct fcnvme_lsdesc_cr_conn_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be16 ersp_ratio; + __be16 rsvd10; + __be32 rsvd12[9]; + __be16 qid; + __be16 sqsize; + __be32 rsvd52; +}; + +/* Disconnect Scope Values */ +enum { + FCNVME_DISCONN_ASSOCIATION = 0, + FCNVME_DISCONN_CONNECTION = 1, +}; + +/* FCNVME_LSDESC_DISCONN_CMD */ +struct fcnvme_lsdesc_disconn_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + u8 rsvd8[3]; + /* note: scope is really a 1 bit field */ + u8 scope; /* FCNVME_DISCONN_xxx */ + __be32 rsvd12; + __be64 id; +}; + +/* FCNVME_LSDESC_CONN_ID */ +struct fcnvme_lsdesc_conn_id { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be64 connection_id; +}; + +/* FCNVME_LSDESC_ASSOC_ID */ +struct fcnvme_lsdesc_assoc_id { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be64 association_id; +}; + +/* r_ctl values */ +enum { + FCNVME_RS_RCTL_DATA = 1, + FCNVME_RS_RCTL_XFER_RDY = 5, + FCNVME_RS_RCTL_RSP = 8, +}; + + +/* ********** start of Link Services ********** */ + + +/* FCNVME_LS_RJT */ +struct fcnvme_ls_rjt { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_rqst rqst; + struct fcnvme_lsdesc_rjt rjt; +}; + +/* FCNVME_LS_ACC */ +struct fcnvme_ls_acc_hdr { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_rqst rqst; + /* Followed by cmd-specific ACC descriptors, see next definitions */ +}; + +/* FCNVME_LS_CREATE_ASSOCIATION */ +struct fcnvme_ls_cr_assoc_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd; +}; + +struct fcnvme_ls_cr_assoc_acc { + struct fcnvme_ls_acc_hdr hdr; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_conn_id connectid; +}; + + +/* FCNVME_LS_CREATE_CONNECTION */ +struct fcnvme_ls_cr_conn_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_cr_conn_cmd connect_cmd; +}; + +struct fcnvme_ls_cr_conn_acc { + struct fcnvme_ls_acc_hdr hdr; + struct fcnvme_lsdesc_conn_id connectid; +}; + +/* FCNVME_LS_DISCONNECT */ +struct fcnvme_ls_disconnect_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_disconn_cmd discon_cmd; +}; + +struct fcnvme_ls_disconnect_acc { + struct fcnvme_ls_acc_hdr hdr; +}; + + +/* + * Yet to be defined in FC-NVME: + */ +#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */ +#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */ +#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */ + + +#endif /* _NVME_FC_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index fc3c24206593..3d1c6f1b15c9 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -242,6 +242,7 @@ enum { NVME_CTRL_ONCS_COMPARE = 1 << 0, NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, + NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, NVME_CTRL_VWC_PRESENT = 1 << 0, }; @@ -558,6 +559,23 @@ struct nvme_dsm_range { __le64 slba; }; +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + /* Admin commands */ enum nvme_admin_opcode { @@ -857,6 +875,7 @@ struct nvme_command { struct nvme_download_firmware dlfw; struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; struct nvme_abort_cmd abort; struct nvme_get_log_page_command get_log_page; struct nvmf_common_command fabrics; @@ -947,6 +966,7 @@ enum { NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, + NVME_SC_ONCS_NOT_SUPPORTED = 0x183, /* * I/O Command Set Specific - Fabrics commands: @@ -973,17 +993,30 @@ enum { NVME_SC_UNWRITTEN_BLOCK = 0x287, NVME_SC_DNR = 0x4000, + + + /* + * FC Transport-specific error status values for NVME commands + * + * Transport-specific status code values must be in the range 0xB0..0xBF + */ + + /* Generic FC failure - catchall */ + NVME_SC_FC_TRANSPORT_ERROR = 0x00B0, + + /* I/O failure due to FC ABTS'd */ + NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1, }; struct nvme_completion { /* * Used by Admin and Fabrics commands to return data: */ - union { - __le16 result16; - __le32 result; - __le64 result64; - }; + union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; + } result; __le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_id; /* submission queue that generated this entry */ __u16 command_id; /* of the command which completed */ diff --git a/include/linux/of.h b/include/linux/of.h index 299aeb192727..d72f01009297 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1266,6 +1266,18 @@ static inline bool of_device_is_system_power_controller(const struct device_node * Overlay support */ +enum of_overlay_notify_action { + OF_OVERLAY_PRE_APPLY, + OF_OVERLAY_POST_APPLY, + OF_OVERLAY_PRE_REMOVE, + OF_OVERLAY_POST_REMOVE, +}; + +struct of_overlay_notify_data { + struct device_node *overlay; + struct device_node *target; +}; + #ifdef CONFIG_OF_OVERLAY /* ID based overlays; the API for external users */ @@ -1273,6 +1285,9 @@ int of_overlay_create(struct device_node *tree); int of_overlay_destroy(int id); int of_overlay_destroy_all(void); +int of_overlay_notifier_register(struct notifier_block *nb); +int of_overlay_notifier_unregister(struct notifier_block *nb); + #else static inline int of_overlay_create(struct device_node *tree) @@ -1290,6 +1305,16 @@ static inline int of_overlay_destroy_all(void) return -ENOTSUPP; } +static inline int of_overlay_notifier_register(struct notifier_block *nb) +{ + return 0; +} + +static inline int of_overlay_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} + #endif #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 4341f32516d8..271b3fdf0070 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -71,6 +71,7 @@ extern int early_init_dt_scan_chosen_stdout(void); extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_reserve_self(void); extern void early_init_dt_add_memory_arch(u64 base, u64 size); +extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool no_map); extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 2ab233661ae5..a58cca8bcb29 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -29,6 +29,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); extern int of_phy_register_fixed_link(struct device_node *np); +extern void of_phy_deregister_fixed_link(struct device_node *np); extern bool of_phy_is_fixed_link(struct device_node *np); #else /* CONFIG_OF */ @@ -83,6 +84,9 @@ static inline int of_phy_register_fixed_link(struct device_node *np) { return -ENOSYS; } +static inline void of_phy_deregister_fixed_link(struct device_node *np) +{ +} static inline bool of_phy_is_fixed_link(struct device_node *np) { return false; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index dd15d39e1985..7dbe9148b2f8 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping, } /* - * Get the offset in PAGE_SIZE. - * (TODO: hugepage should have ->index in PAGE_SIZE) + * Get index of the page with in radix-tree + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) */ -static inline pgoff_t page_to_pgoff(struct page *page) +static inline pgoff_t page_to_index(struct page *page) { pgoff_t pgoff; - if (unlikely(PageHeadHuge(page))) - return page->index << compound_order(page); - if (likely(!PageTransTail(page))) return page->index; @@ -397,6 +394,18 @@ static inline pgoff_t page_to_pgoff(struct page *page) } /* + * Get the offset in PAGE_SIZE. + * (TODO: hugepage should have ->index in PAGE_SIZE) + */ +static inline pgoff_t page_to_pgoff(struct page *page) +{ + if (unlikely(PageHeadHuge(page))) + return page->index << compound_order(page); + + return page_to_index(page); +} + +/* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) diff --git a/include/linux/parser.h b/include/linux/parser.h index 39d5b7955b23..884c1e6eb3fe 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -27,6 +27,7 @@ typedef struct { int match_token(char *, const match_table_t table, substring_t args[]); int match_int(substring_t *, int *result); +int match_u64(substring_t *, u64 *result); int match_octal(substring_t *, int *result); int match_hex(substring_t *, int *result); bool match_wildcard(const char *pattern, const char *str); diff --git a/include/linux/pci.h b/include/linux/pci.h index 0e49f70dbd9b..30d6c162e053 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -244,6 +244,7 @@ struct pci_cap_saved_state { struct pci_cap_saved_data cap; }; +struct irq_affinity; struct pcie_link_state; struct pci_vpd; struct pci_sriov; @@ -332,7 +333,6 @@ struct pci_dev { * directly, use the values stored here. They might be different! */ unsigned int irq; - struct cpumask *irq_affinity; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ bool match_driver; /* Skip attaching driver */ @@ -1310,8 +1310,10 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, return rc; return 0; } -int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags); +int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *affd); + void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); @@ -1339,14 +1341,17 @@ static inline int pci_enable_msix_range(struct pci_dev *dev, static inline int pci_enable_msix_exact(struct pci_dev *dev, struct msix_entry *entries, int nvec) { return -ENOSYS; } -static inline int pci_alloc_irq_vectors(struct pci_dev *dev, - unsigned int min_vecs, unsigned int max_vecs, - unsigned int flags) + +static inline int +pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *aff_desc) { if (min_vecs > 1) return -EINVAL; return 1; } + static inline void pci_free_irq_vectors(struct pci_dev *dev) { } @@ -1364,6 +1369,14 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, } #endif +static inline int +pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, + NULL); +} + #ifdef CONFIG_PCIEPORTBUS extern bool pcie_ports_disabled; extern bool pcie_ports_auto; @@ -1928,6 +1941,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev) return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } +static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) +{ + while (1) { + if (!pci_is_pcie(dev)) + break; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return dev; + if (!dev->bus->self) + break; + dev = dev->bus->self; + } + return NULL; +} + void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c58752fe16c4..a5e6c7bca610 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -23,8 +23,10 @@ #define PCI_CLASS_STORAGE_SATA 0x0106 #define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 #define PCI_CLASS_STORAGE_SAS 0x0107 +#define PCI_CLASS_STORAGE_EXPRESS 0x010802 #define PCI_CLASS_STORAGE_OTHER 0x0180 + #define PCI_BASE_CLASS_NETWORK 0x02 #define PCI_CLASS_NETWORK_ETHERNET 0x0200 #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 diff --git a/include/linux/phy.h b/include/linux/phy.h index e25f1830fbcf..f7d95f644eed 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -25,6 +25,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> +#include <linux/phy_led_triggers.h> #include <linux/atomic.h> @@ -85,6 +86,21 @@ typedef enum { } phy_interface_t; /** + * phy_supported_speeds - return all speeds currently supported by a phy device + * @phy: The phy device to return supported speeds of. + * @speeds: buffer to store supported speeds in. + * @size: size of speeds buffer. + * + * Description: Returns the number of supported speeds, and + * fills the speeds * buffer with the supported speeds. If speeds buffer is + * too small to contain * all currently supported speeds, will return as + * many speeds as can fit. + */ +unsigned int phy_supported_speeds(struct phy_device *phy, + unsigned int *speeds, + unsigned int size); + +/** * It maps 'enum phy_interface_t' found in include/linux/phy.h * into the device tree binding of 'phy-mode', so that Ethernet * device driver can get phy interface from device tree. @@ -343,7 +359,7 @@ struct phy_c45_device_ids { * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) * phy_timer: The timer for handling the state machine - * phy_queue: A work_queue for the interrupt + * phy_queue: A work_queue for the phy_mac_interrupt * attached_dev: The attached enet driver's device instance ptr * adjust_link: Callback for the enet controller to respond to * changes in the link state. @@ -401,10 +417,19 @@ struct phy_device { u32 advertising; u32 lp_advertising; + /* Energy efficient ethernet modes which should be prohibited */ + u32 eee_broken_modes; + int autoneg; int link_timeout; +#ifdef CONFIG_LED_TRIGGER_PHY + struct phy_led_trigger *phy_led_triggers; + unsigned int phy_num_led_triggers; + struct phy_led_trigger *last_triggered; +#endif + /* * Interrupt number for this PHY * -1 means no interrupt @@ -425,6 +450,7 @@ struct phy_device { struct net_device *attached_dev; u8 mdix; + u8 mdix_ctrl; void (*adjust_link)(struct net_device *dev); }; @@ -589,6 +615,13 @@ struct phy_driver { void (*get_strings)(struct phy_device *dev, u8 *data); void (*get_stats)(struct phy_device *dev, struct ethtool_stats *stats, u64 *data); + + /* Get and Set PHY tunables */ + int (*get_tunable)(struct phy_device *dev, + struct ethtool_tunable *tuna, void *data); + int (*set_tunable)(struct phy_device *dev, + struct ethtool_tunable *tuna, + const void *data); }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) @@ -764,6 +797,7 @@ void phy_detach(struct phy_device *phydev); void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); +int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); @@ -802,7 +836,8 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_state_machine(struct work_struct *work); -void phy_change(struct work_struct *work); +void phy_change(struct phy_device *phydev); +void phy_change_work(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev, int new_link); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); @@ -825,6 +860,10 @@ int phy_register_fixup_for_id(const char *bus_id, int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); +int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask); +int phy_unregister_fixup_for_id(const char *bus_id); +int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); + int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); int phy_get_eee_err(struct phy_device *phydev); int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); @@ -836,6 +875,7 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd); int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); +int phy_ethtool_nway_reset(struct net_device *ndev); int __init mdio_bus_init(void); void mdio_bus_exit(void); diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h index 9d18e9f948e9..35c070ea6ea3 100644 --- a/include/linux/phy/phy-qcom-ufs.h +++ b/include/linux/phy/phy-qcom-ufs.h @@ -18,22 +18,6 @@ #include "phy.h" /** - * ufs_qcom_phy_enable_ref_clk() - Enable the phy - * ref clock. - * @phy: reference to a generic phy - * - * returns 0 for success, and non-zero for error. - */ -int ufs_qcom_phy_enable_ref_clk(struct phy *phy); - -/** - * ufs_qcom_phy_disable_ref_clk() - Disable the phy - * ref clock. - * @phy: reference to a generic phy. - */ -void ufs_qcom_phy_disable_ref_clk(struct phy *phy); - -/** * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device * ref clock. * @phy: reference to a generic phy. @@ -47,8 +31,6 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy); */ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy); -int ufs_qcom_phy_enable_iface_clk(struct phy *phy); -void ufs_qcom_phy_disable_iface_clk(struct phy *phy); int ufs_qcom_phy_start_serdes(struct phy *phy); int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes); int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B); diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h new file mode 100644 index 000000000000..a2daea0a37d2 --- /dev/null +++ b/include/linux/phy_led_triggers.h @@ -0,0 +1,51 @@ +/* Copyright (C) 2016 National Instruments Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __PHY_LED_TRIGGERS +#define __PHY_LED_TRIGGERS + +struct phy_device; + +#ifdef CONFIG_LED_TRIGGER_PHY + +#include <linux/leds.h> + +#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 +#define PHY_MII_BUS_ID_SIZE (20 - 3) + +#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ + FIELD_SIZEOF(struct mdio_device, addr)+\ + PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) + +struct phy_led_trigger { + struct led_trigger trigger; + char name[PHY_LINK_LED_TRIGGER_NAME_SIZE]; + unsigned int speed; +}; + + +extern int phy_led_triggers_register(struct phy_device *phy); +extern void phy_led_triggers_unregister(struct phy_device *phy); +extern void phy_led_trigger_change_speed(struct phy_device *phy); + +#else + +static inline int phy_led_triggers_register(struct phy_device *phy) +{ + return 0; +} +static inline void phy_led_triggers_unregister(struct phy_device *phy) { } +static inline void phy_led_trigger_change_speed(struct phy_device *phy) { } + +#endif + +#endif diff --git a/include/linux/pim.h b/include/linux/pim.h index e1d756f81348..0e81b2778ae0 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h @@ -1,6 +1,7 @@ #ifndef __LINUX_PIM_H #define __LINUX_PIM_H +#include <linux/skbuff.h> #include <asm/byteorder.h> /* Message types - V1 */ @@ -9,24 +10,86 @@ /* Message types - V2 */ #define PIM_VERSION 2 -#define PIM_REGISTER 1 + +/* RFC7761, sec 4.9: + * Type + * Types for specific PIM messages. PIM Types are: + * + * Message Type Destination + * --------------------------------------------------------------------- + * 0 = Hello Multicast to ALL-PIM-ROUTERS + * 1 = Register Unicast to RP + * 2 = Register-Stop Unicast to source of Register + * packet + * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS + * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS + * 5 = Assert Multicast to ALL-PIM-ROUTERS + * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S) + * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft + * packet + * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR + */ +enum { + PIM_TYPE_HELLO, + PIM_TYPE_REGISTER, + PIM_TYPE_REGISTER_STOP, + PIM_TYPE_JOIN_PRUNE, + PIM_TYPE_BOOTSTRAP, + PIM_TYPE_ASSERT, + PIM_TYPE_GRAFT, + PIM_TYPE_GRAFT_ACK, + PIM_TYPE_CANDIDATE_RP_ADV +}; #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) -static inline bool ipmr_pimsm_enabled(void) -{ - return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); -} +/* RFC7761, sec 4.9: + * The PIM header common to all PIM messages is: + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |PIM Ver| Type | Reserved | Checksum | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct pimhdr { + __u8 type; + __u8 reserved; + __be16 csum; +}; /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ -struct pimreghdr -{ +struct pimreghdr { __u8 type; __u8 reserved; __be16 csum; __be32 flags; }; -struct sk_buff; -extern int pim_rcv_v1(struct sk_buff *); +int pim_rcv_v1(struct sk_buff *skb); + +static inline bool ipmr_pimsm_enabled(void) +{ + return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); +} + +static inline struct pimhdr *pim_hdr(const struct sk_buff *skb) +{ + return (struct pimhdr *)skb_transport_header(skb); +} + +static inline u8 pim_hdr_version(const struct pimhdr *pimhdr) +{ + return pimhdr->type >> 4; +} + +static inline u8 pim_hdr_type(const struct pimhdr *pimhdr) +{ + return pimhdr->type & 0xf; +} + +/* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */ +static inline bool pim_ipv4_all_pim_routers(__be32 addr) +{ + return addr == htonl(0xE000000D); +} #endif diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h index ecbde7a5548e..7b78793f07d7 100644 --- a/include/linux/pm-trace.h +++ b/include/linux/pm-trace.h @@ -1,11 +1,17 @@ #ifndef PM_TRACE_H #define PM_TRACE_H +#include <linux/types.h> #ifdef CONFIG_PM_TRACE #include <asm/pm-trace.h> -#include <linux/types.h> extern int pm_trace_enabled; +extern bool pm_trace_rtc_abused; + +static inline bool pm_trace_rtc_valid(void) +{ + return !pm_trace_rtc_abused; +} static inline int pm_trace_is_enabled(void) { @@ -24,6 +30,7 @@ extern int show_trace_dev_match(char *buf, size_t size); #else +static inline bool pm_trace_rtc_valid(void) { return true; } static inline int pm_trace_is_enabled(void) { return 0; } #define TRACE_DEVICE(dev) do { } while (0) diff --git a/include/linux/pm.h b/include/linux/pm.h index 06eb353182ab..f926af41e122 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -258,7 +258,7 @@ typedef struct pm_message { * example, if it detects that a child was unplugged while the system was * asleep). * - * Refer to Documentation/power/devices.txt for more information about the role + * Refer to Documentation/power/admin-guide/devices.rst for more information about the role * of the above callbacks in the system suspend process. * * There also are callbacks related to runtime power management of devices. @@ -559,6 +559,7 @@ struct dev_pm_info { pm_message_t power_state; unsigned int can_wakeup:1; unsigned int async_suspend:1; + bool in_dpm_list:1; /* Owned by the PM core */ bool is_prepared:1; /* Owned by the PM core */ bool is_suspended:1; /* Ditto */ bool is_noirq_suspended:1; @@ -596,6 +597,7 @@ struct dev_pm_info { unsigned int use_autosuspend:1; unsigned int timer_autosuspends:1; unsigned int memalloc_noio:1; + unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a09fe5c009c8..81ece61075df 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -15,11 +15,11 @@ #include <linux/err.h> #include <linux/of.h> #include <linux/notifier.h> +#include <linux/spinlock.h> /* Defines used for the flags field in the struct generic_pm_domain */ #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ - -#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */ +#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */ enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -40,15 +40,18 @@ struct gpd_dev_ops { struct genpd_power_state { s64 power_off_latency_ns; s64 power_on_latency_ns; + s64 residency_ns; + struct fwnode_handle *fwnode; }; +struct genpd_lock_ops; + struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ struct list_head master_links; /* Links with PM domain as a master */ struct list_head slave_links; /* Links with PM domain as a slave */ struct list_head dev_list; /* List of devices */ - struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; struct fwnode_handle *provider; /* Identity of the domain provider */ @@ -70,9 +73,18 @@ struct generic_pm_domain { void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ - struct genpd_power_state states[GENPD_MAX_NUM_STATES]; + struct genpd_power_state *states; unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ + void *free; /* Free the state that was allocated for default */ + const struct genpd_lock_ops *lock_ops; + union { + struct mutex mlock; + struct { + spinlock_t slock; + unsigned long lock_flags; + }; + }; }; @@ -205,6 +217,8 @@ extern int of_genpd_add_device(struct of_phandle_args *args, extern int of_genpd_add_subdomain(struct of_phandle_args *parent, struct of_phandle_args *new_subdomain); extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); +extern int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n); int genpd_dev_pm_attach(struct device *dev); #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -234,6 +248,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, return -ENODEV; } +static inline int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n) +{ + return -ENODEV; +} + static inline int genpd_dev_pm_attach(struct device *dev) { return -ENODEV; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index bca26157f5b6..0edd88f93904 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -17,13 +17,65 @@ #include <linux/err.h> #include <linux/notifier.h> +struct clk; +struct regulator; struct dev_pm_opp; struct device; +struct opp_table; enum dev_pm_opp_event { OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, }; +/** + * struct dev_pm_opp_supply - Power supply voltage/current values + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * + * This structure stores the voltage/current values for a single power supply. + */ +struct dev_pm_opp_supply { + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; +}; + +/** + * struct dev_pm_opp_info - OPP freq/voltage/current values + * @rate: Target clk rate in hz + * @supplies: Array of voltage/current values for all power supplies + * + * This structure stores the freq/voltage/current values for a single OPP. + */ +struct dev_pm_opp_info { + unsigned long rate; + struct dev_pm_opp_supply *supplies; +}; + +/** + * struct dev_pm_set_opp_data - Set OPP data + * @old_opp: Old OPP info + * @new_opp: New OPP info + * @regulators: Array of regulator pointers + * @regulator_count: Number of regulators + * @clk: Pointer to clk + * @dev: Pointer to the struct device + * + * This structure contains all information required for setting an OPP. + */ +struct dev_pm_set_opp_data { + struct dev_pm_opp_info old_opp; + struct dev_pm_opp_info new_opp; + + struct regulator **regulators; + unsigned int regulator_count; + struct clk *clk; + struct device *dev; +}; + #if defined(CONFIG_PM_OPP) unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); @@ -62,8 +114,10 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, void dev_pm_opp_put_supported_hw(struct device *dev); int dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct device *dev); -int dev_pm_opp_set_regulator(struct device *dev, const char *name); -void dev_pm_opp_put_regulator(struct device *dev); +struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); +void dev_pm_opp_put_regulators(struct opp_table *opp_table); +int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); +void dev_pm_opp_register_put_opp_helper(struct device *dev); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -163,6 +217,14 @@ static inline int dev_pm_opp_set_supported_hw(struct device *dev, static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} +static inline int dev_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {} + static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { return -ENOTSUPP; @@ -170,12 +232,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) static inline void dev_pm_opp_put_prop_name(struct device *dev) {} -static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) +static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { - return -ENOTSUPP; + return ERR_PTR(-ENOTSUPP); } -static inline void dev_pm_opp_put_regulator(struct device *dev) {} +static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2e14d2667b6c..ca4823e675e2 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -55,18 +55,17 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); +extern void pm_runtime_clean_up_links(struct device *dev); +extern void pm_runtime_get_suppliers(struct device *dev); +extern void pm_runtime_put_suppliers(struct device *dev); +extern void pm_runtime_new_link(struct device *dev); +extern void pm_runtime_drop_link(struct device *dev); static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; } -static inline bool pm_children_suspended(struct device *dev) -{ - return dev->power.ignore_children - || !atomic_read(&dev->power.child_count); -} - static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); @@ -162,7 +161,6 @@ static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} -static inline bool pm_children_suspended(struct device *dev) { return false; } static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} static inline bool device_run_wake(struct device *dev) { return false; } @@ -186,6 +184,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} +static inline void pm_runtime_clean_up_links(struct device *dev) {} +static inline void pm_runtime_get_suppliers(struct device *dev) {} +static inline void pm_runtime_put_suppliers(struct device *dev) {} +static inline void pm_runtime_new_link(struct device *dev) {} +static inline void pm_runtime_drop_link(struct device *dev) {} #endif /* !CONFIG_PM */ @@ -265,9 +268,9 @@ static inline int pm_runtime_set_active(struct device *dev) return __pm_runtime_set_status(dev, RPM_ACTIVE); } -static inline void pm_runtime_set_suspended(struct device *dev) +static inline int pm_runtime_set_suspended(struct device *dev) { - __pm_runtime_set_status(dev, RPM_SUSPENDED); + return __pm_runtime_set_status(dev, RPM_SUSPENDED); } static inline void pm_runtime_disable(struct device *dev) diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index e30deb046156..bed9557b69e7 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -4,7 +4,8 @@ enum bq27xxx_chip { BQ27000 = 1, /* bq27000, bq27200 */ BQ27010, /* bq27010, bq27210 */ - BQ27500, /* bq27500, bq27510, bq27520 */ + BQ27500, /* bq27500 */ + BQ27510, /* bq27510, bq27520 */ BQ27530, /* bq27530, bq27531 */ BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ BQ27545, /* bq27545 */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 75e4e30677f1..7eeceac52dea 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -65,19 +65,24 @@ /* * Are we doing bottom half or hardware interrupt processing? - * Are we in a softirq context? Interrupt context? - * in_softirq - Are we currently processing softirq or have bh disabled? - * in_serving_softirq - Are we currently processing softirq? + * + * in_irq() - We're in (hard) IRQ context + * in_softirq() - We have BH disabled, or are processing softirqs + * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled + * in_serving_softirq() - We're in softirq context + * in_nmi() - We're in NMI context + * in_task() - We're in task context + * + * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really + * should not be used in new code. */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - -/* - * Are we in NMI context? - */ -#define in_nmi() (preempt_count() & NMI_MASK) +#define in_nmi() (preempt_count() & NMI_MASK) +#define in_task() (!(preempt_count() & \ + (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) /* * The preempt_count offset after preempt_disable(); diff --git a/include/linux/printk.h b/include/linux/printk.h index eac1af8502bb..3472cc6b7a60 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -10,6 +10,8 @@ extern const char linux_banner[]; extern const char linux_proc_banner[]; +#define PRINTK_MAX_SINGLE_HEADER_LEN 2 + static inline int printk_get_level(const char *buffer) { if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { @@ -31,6 +33,14 @@ static inline const char *printk_skip_level(const char *buffer) return buffer; } +static inline const char *printk_skip_headers(const char *buffer) +{ + while (printk_get_level(buffer)) + buffer = printk_skip_level(buffer); + + return buffer; +} + #define CONSOLE_EXT_LOG_MAX 8192 /* printk's without a loglevel use this.. */ @@ -40,10 +50,15 @@ static inline const char *printk_skip_level(const char *buffer) #define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ #define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ #define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ -#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */ #define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ #define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ +/* + * Default used to be hard-coded at 7, we're now allowing it to be set from + * kernel config. + */ +#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT + extern int console_printk[]; #define console_loglevel (console_printk[0]) diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index b97bf2ef996e..2d2bf592d9db 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, struct proc_dir_entry *, void *); extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, struct proc_dir_entry *); +struct proc_dir_entry *proc_create_mount_point(const char *name); extern struct proc_dir_entry *proc_create_data(const char *, umode_t, struct proc_dir_entry *, @@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) { return NULL;} static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} +static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, @@ -82,4 +84,8 @@ static inline struct proc_dir_entry *proc_net_mkdir( return proc_mkdir_data(name, 0, parent, net); } +struct ns_common; +int open_related_ns(struct ns_common *ns, + struct ns_common *(*get_ns)(struct ns_common *ns)); + #endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 92013cc9cc8c..0da29cae009b 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -89,4 +89,80 @@ extern int pstore_register(struct pstore_info *); extern void pstore_unregister(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); +struct pstore_ftrace_record { + unsigned long ip; + unsigned long parent_ip; + u64 ts; +}; + +/* + * ftrace related stuff: Both backends and frontends need these so expose + * them here. + */ + +#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB) +#define PSTORE_CPU_IN_IP 0x1 +#elif NR_CPUS <= 4 && defined(CONFIG_ARM) +#define PSTORE_CPU_IN_IP 0x3 +#endif + +#define TS_CPU_SHIFT 8 +#define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1) + +/* + * If CPU number can be stored in IP, store it there, otherwise store it in + * the time stamp. This means more timestamp resolution is available when + * the CPU can be stored in the IP. + */ +#ifdef PSTORE_CPU_IN_IP +static inline void +pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) +{ + rec->ip |= cpu; +} + +static inline unsigned int +pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) +{ + return rec->ip & PSTORE_CPU_IN_IP; +} + +static inline u64 +pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) +{ + return rec->ts; +} + +static inline void +pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) +{ + rec->ts = val; +} +#else +static inline void +pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) +{ + rec->ts &= ~(TS_CPU_MASK); + rec->ts |= cpu; +} + +static inline unsigned int +pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) +{ + return rec->ts & TS_CPU_MASK; +} + +static inline u64 +pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) +{ + return rec->ts >> TS_CPU_SHIFT; +} + +static inline void +pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) +{ + rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT); +} +#endif + #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index c668c861c96c..9395f06e8372 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -24,6 +24,13 @@ #include <linux/list.h> #include <linux/types.h> +/* + * Choose whether access to the RAM zone requires locking or not. If a zone + * can be written to from different CPUs like with ftrace for example, then + * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. + */ +#define PRZ_FLAG_NO_LOCK BIT(0) + struct persistent_ram_buffer; struct rs_control; @@ -40,6 +47,8 @@ struct persistent_ram_zone { void *vaddr; struct persistent_ram_buffer *buffer; size_t buffer_size; + u32 flags; + raw_spinlock_t buffer_lock; /* ECC correction */ char *par_buffer; @@ -55,7 +64,7 @@ struct persistent_ram_zone { struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, - unsigned int memtype); + unsigned int memtype, u32 flags); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); @@ -77,6 +86,8 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, * @mem_address physical memory address to contain ramoops */ +#define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0) + struct ramoops_platform_data { unsigned long mem_size; phys_addr_t mem_address; @@ -86,6 +97,7 @@ struct ramoops_platform_data { unsigned long ftrace_size; unsigned long pmsg_size; int dump_oops; + u32 flags; struct persistent_ram_ecc_info ecc_info; }; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 5ad54fc66cf0..a026bfd089db 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -58,7 +58,14 @@ struct system_device_crosststamp; * * clock operations * + * @adjfine: Adjusts the frequency of the hardware clock. + * parameter scaled_ppm: Desired frequency offset from + * nominal frequency in parts per million, but with a + * 16 bit binary fractional field. + * * @adjfreq: Adjusts the frequency of the hardware clock. + * This method is deprecated. New drivers should implement + * the @adjfine method instead. * parameter delta: Desired frequency offset from nominal frequency * in parts per billion * @@ -108,6 +115,7 @@ struct ptp_clock_info { int n_pins; int pps; struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm); int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); @@ -122,30 +130,6 @@ struct ptp_clock_info { struct ptp_clock; -/** - * ptp_clock_register() - register a PTP hardware clock driver - * - * @info: Structure describing the new clock. - * @parent: Pointer to the parent device of the new clock. - * - * Returns a valid pointer on success or PTR_ERR on failure. If PHC - * support is missing at the configuration level, this function - * returns NULL, and drivers are expected to gracefully handle that - * case separately. - */ - -extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, - struct device *parent); - -/** - * ptp_clock_unregister() - unregister a PTP hardware clock driver - * - * @ptp: The clock to remove from service. - */ - -extern int ptp_clock_unregister(struct ptp_clock *ptp); - - enum ptp_clock_events { PTP_CLOCK_ALARM, PTP_CLOCK_EXTTS, @@ -171,6 +155,31 @@ struct ptp_clock_event { }; }; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + +/** + * ptp_clock_register() - register a PTP hardware clock driver + * + * @info: Structure describing the new clock. + * @parent: Pointer to the parent device of the new clock. + * + * Returns a valid pointer on success or PTR_ERR on failure. If PHC + * support is missing at the configuration level, this function + * returns NULL, and drivers are expected to gracefully handle that + * case separately. + */ + +extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent); + +/** + * ptp_clock_unregister() - unregister a PTP hardware clock driver + * + * @ptp: The clock to remove from service. + */ + +extern int ptp_clock_unregister(struct ptp_clock *ptp); + /** * ptp_clock_event() - notify the PTP layer about an event * @@ -202,4 +211,20 @@ extern int ptp_clock_index(struct ptp_clock *ptp); int ptp_find_pin(struct ptp_clock *ptp, enum ptp_pin_function func, unsigned int chan); +#else +static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent) +{ return NULL; } +static inline int ptp_clock_unregister(struct ptp_clock *ptp) +{ return 0; } +static inline void ptp_clock_event(struct ptp_clock *ptp, + struct ptp_clock_event *event) +{ } +static inline int ptp_clock_index(struct ptp_clock *ptp) +{ return -1; } +static inline int ptp_find_pin(struct ptp_clock *ptp, + enum ptp_pin_function func, unsigned int chan) +{ return -1; } +#endif + #endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 504c98a278d4..e0e539321ab9 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -8,6 +8,9 @@ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ #include <uapi/linux/ptrace.h> +extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, + void *buf, int len, unsigned int gup_flags); + /* * Ptrace flags * @@ -19,7 +22,6 @@ #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ #define PT_PTRACED 0x00000001 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ -#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */ #define PT_OPT_FLAG_SHIFT 3 /* PT_TRACE_* event enable flags */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 72d88cf3ca25..37dfba101c6c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 { u32 cons_page_idx; }; -struct qed_chain_pbl { - /* Base address of a pre-allocated buffer for pbl */ - dma_addr_t p_phys_table; - void *p_virt_table; - - /* Table for keeping the virtual addresses of the chain pages, - * respectively to the physical addresses in the pbl table. - */ - void **pp_virt_addr_tbl; - - /* Index to current used page by producer/consumer */ - union { - struct qed_chain_pbl_u16 pbl16; - struct qed_chain_pbl_u32 pbl32; - } u; -}; - struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ u16 prod_idx; @@ -86,46 +69,78 @@ struct qed_chain_u32 { }; struct qed_chain { - void *p_virt_addr; - dma_addr_t p_phys_addr; - void *p_prod_elem; - void *p_cons_elem; + /* fastpath portion of the chain - required for commands such + * as produce / consume. + */ + /* Point to next element to produce/consume */ + void *p_prod_elem; + void *p_cons_elem; + + /* Fastpath portions of the PBL [if exists] */ + struct { + /* Table for keeping the virtual addresses of the chain pages, + * respectively to the physical addresses in the pbl table. + */ + void **pp_virt_addr_tbl; - enum qed_chain_mode mode; - enum qed_chain_use_mode intended_use; /* used to produce/consume */ - enum qed_chain_cnt_type cnt_type; + union { + struct qed_chain_pbl_u16 u16; + struct qed_chain_pbl_u32 u32; + } c; + } pbl; union { struct qed_chain_u16 chain16; struct qed_chain_u32 chain32; } u; + /* Capacity counts only usable elements */ + u32 capacity; u32 page_cnt; - /* Number of elements - capacity is for usable elements only, - * while size will contain total number of elements [for entire chain]. + enum qed_chain_mode mode; + + /* Elements information for fast calculations */ + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; + + u8 cnt_type; + + /* Slowpath of the chain - required for initialization and destruction, + * but isn't involved in regular functionality. */ - u32 capacity; + + /* Base address of a pre-allocated buffer for pbl */ + struct { + dma_addr_t p_phys_table; + void *p_virt_table; + } pbl_sp; + + /* Address of first page of the chain - the address is required + * for fastpath operation [consume/produce] but only for the the SINGLE + * flavour which isn't considered fastpath [== SPQ]. + */ + void *p_virt_addr; + dma_addr_t p_phys_addr; + + /* Total number of elements [for entire chain] */ u32 size; - /* Elements information for fast calculations */ - u16 elem_per_page; - u16 elem_per_page_mask; - u16 elem_unusable; - u16 usable_per_page; - u16 elem_size; - u16 next_page_mask; - struct qed_chain_pbl pbl; + u8 intended_use; }; #define QED_CHAIN_PBL_ENTRY_SIZE (8) #define QED_CHAIN_PAGE_SIZE (0x1000) #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) -#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ - (1 + ((sizeof(struct qed_chain_next) - 1) / \ - (elem_size))) : 0) +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ + (elem_size))) : 0) #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ ((u32)(ELEMS_PER_PAGE(elem_size) - \ @@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) return p_chain->usable_per_page; } -static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) +static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) { return p_chain->elem_unusable; } @@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { - return p_chain->pbl.p_phys_table; + return p_chain->pbl_sp.p_phys_table; } /** @@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) static inline void qed_chain_advance_page(struct qed_chain *p_chain, void **p_next_elem, void *idx_to_inc, void *page_to_inc) - { struct qed_chain_next *p_next = NULL; u32 page_index = 0; + switch (p_chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: p_next = *p_next_elem; @@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) if ((p_chain->u.chain16.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain16.prod_idx; - p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; + p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } @@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) if ((p_chain->u.chain32.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain32.prod_idx; - p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; + p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } @@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) if ((p_chain->u.chain16.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain16.cons_idx; - p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; + p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } @@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) if ((p_chain->u.chain32.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain32.cons_idx; - p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; - qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } p_chain->u.chain32.cons_idx++; @@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) u32 reset_val = p_chain->page_cnt - 1; if (is_chain_u16(p_chain)) { - p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; - p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; + p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val; + p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val; } else { - p_chain->pbl.u.pbl32.prod_page_idx = reset_val; - p_chain->pbl.u.pbl32.cons_page_idx = reset_val; + p_chain->pbl.c.u32.prod_page_idx = reset_val; + p_chain->pbl.c.u32.cons_page_idx = reset_val; } } switch (p_chain->intended_use) { - case QED_CHAIN_USE_TO_CONSUME_PRODUCE: - case QED_CHAIN_USE_TO_PRODUCE: - /* Do nothing */ - break; - case QED_CHAIN_USE_TO_CONSUME: /* produce empty elements */ for (i = 0; i < p_chain->capacity; i++) qed_chain_recycle_consumed(p_chain); break; + + case QED_CHAIN_USE_TO_CONSUME_PRODUCE: + case QED_CHAIN_USE_TO_PRODUCE: + default: + /* Do nothing */ + break; } } @@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->p_virt_addr = NULL; p_chain->p_phys_addr = 0; p_chain->elem_size = elem_size; - p_chain->intended_use = intended_use; + p_chain->intended_use = (u8)intended_use; p_chain->mode = mode; - p_chain->cnt_type = cnt_type; + p_chain->cnt_type = (u8)cnt_type; - p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->next_page_mask = (p_chain->usable_per_page & p_chain->elem_per_page_mask); @@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->capacity = p_chain->usable_per_page * page_cnt; p_chain->size = p_chain->elem_per_page * page_cnt; - p_chain->pbl.p_phys_table = 0; - p_chain->pbl.p_virt_table = NULL; + p_chain->pbl_sp.p_phys_table = 0; + p_chain->pbl_sp.p_virt_table = NULL; p_chain->pbl.pp_virt_addr_tbl = NULL; } @@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, dma_addr_t p_phys_pbl, void **pp_virt_addr_tbl) { - p_chain->pbl.p_phys_table = p_phys_pbl; - p_chain->pbl.p_virt_table = p_virt_pbl; + p_chain->pbl_sp.p_phys_table = p_phys_pbl; + p_chain->pbl_sp.p_virt_table = p_virt_pbl; p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; } diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 33c24ebc9b7f..7a52f7c58c37 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -15,6 +15,29 @@ #include <linux/qed/qed_if.h> #include <linux/qed/qed_iov_if.h> +struct qed_queue_start_common_params { + /* Should always be relative to entity sending this. */ + u8 vport_id; + u16 queue_id; + + /* Relative, but relevant only for PFs */ + u8 stats_id; + + /* These are always absolute */ + u16 sb; + u8 sb_idx; +}; + +struct qed_rxq_start_ret_params { + void __iomem *p_prod; + void *p_handle; +}; + +struct qed_txq_start_ret_params { + void __iomem *p_doorbell; + void *p_handle; +}; + struct qed_dev_eth_info { struct qed_dev_info common; @@ -22,7 +45,8 @@ struct qed_dev_eth_info { u8 num_tc; u8 port_mac[ETH_ALEN]; - u8 num_vlan_filters; + u16 num_vlan_filters; + u16 num_mac_filters; /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; @@ -55,18 +79,6 @@ struct qed_start_vport_params { bool clear_stats; }; -struct qed_stop_rxq_params { - u8 rss_id; - u8 rx_queue_id; - u8 vport_id; - bool eq_completion_only; -}; - -struct qed_stop_txq_params { - u8 rss_id; - u8 tx_queue_id; -}; - enum qed_filter_rx_mode_type { QED_FILTER_RX_MODE_TYPE_REGULAR, QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, @@ -111,15 +123,6 @@ struct qed_filter_params { union qed_filter_type_params filter; }; -struct qed_queue_start_common_params { - u8 rss_id; - u8 queue_id; - u8 vport_id; - u16 sb; - u16 sb_idx; - u16 vf_qid; -}; - struct qed_tunn_params { u16 vxlan_port; u8 update_vxlan_port; @@ -129,7 +132,7 @@ struct qed_tunn_params { struct qed_eth_cb_ops { struct qed_common_cb_ops common; - void (*force_mac) (void *dev, u8 *mac); + void (*force_mac) (void *dev, u8 *mac, bool forced); }; #ifdef CONFIG_DCB @@ -219,24 +222,24 @@ struct qed_eth_ops { struct qed_update_vport_params *params); int (*q_rx_start)(struct qed_dev *cdev, + u8 rss_num, struct qed_queue_start_common_params *params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, - void __iomem **pp_prod); + struct qed_rxq_start_ret_params *ret_params); - int (*q_rx_stop)(struct qed_dev *cdev, - struct qed_stop_rxq_params *params); + int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); int (*q_tx_start)(struct qed_dev *cdev, + u8 rss_num, struct qed_queue_start_common_params *params, dma_addr_t pbl_addr, u16 pbl_size, - void __iomem **pp_doorbell); + struct qed_txq_start_ret_params *ret_params); - int (*q_tx_stop)(struct qed_dev *cdev, - struct qed_stop_txq_params *params); + int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); int (*filter_config)(struct qed_dev *cdev, struct qed_filter_params *params); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8978a60371f4..4b454f4f5b25 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -166,6 +166,7 @@ struct qed_iscsi_pf_params { u32 max_cwnd; u16 cq_num_entries; u16 cmdq_num_entries; + u32 two_msl_timer; u16 dup_ack_threshold; u16 tx_sws_timer; u16 min_rto; @@ -267,11 +268,15 @@ struct qed_dev_info { u8 mf_mode; bool tx_switching; bool rdma_supported; + u16 mtu; + + bool wol_support; }; enum qed_sb_type { QED_SB_TYPE_L2_QUEUE, QED_SB_TYPE_CNQ, + QED_SB_TYPE_STORAGE, }; enum qed_protocol { @@ -401,6 +406,15 @@ struct qed_selftest_ops { * @return 0 on success, error otherwise. */ int (*selftest_clock)(struct qed_dev *cdev); + +/** + * @brief selftest_nvram - Perform nvram test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_nvram) (struct qed_dev *cdev); }; struct qed_common_ops { @@ -554,6 +568,41 @@ struct qed_common_ops { */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); + +/** + * @brief update_drv_state - API to inform the change in the driver state. + * + * @param cdev + * @param active + * + */ + int (*update_drv_state)(struct qed_dev *cdev, bool active); + +/** + * @brief update_mac - API to inform the change in the mac address + * + * @param cdev + * @param mac + * + */ + int (*update_mac)(struct qed_dev *cdev, u8 *mac); + +/** + * @brief update_mtu - API to inform the change in the mtu + * + * @param cdev + * @param mtu + * + */ + int (*update_mtu)(struct qed_dev *cdev, u16 mtu); + +/** + * @brief update_wol - update of changes in the WoL configuration + * + * @param cdev + * @param enabled - true iff WoL should be enabled. + */ + int (*update_wol) (struct qed_dev *cdev, bool enabled); }; #define MASK_FIELD(_name, _value) \ diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h new file mode 100644 index 000000000000..d27912480cb3 --- /dev/null +++ b/include/linux/qed/qed_iscsi_if.h @@ -0,0 +1,229 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_ISCSI_IF_H +#define _QED_ISCSI_IF_H +#include <linux/types.h> +#include <linux/qed/qed_if.h> + +typedef int (*iscsi_event_cb_t) (void *context, + u8 fw_event_code, void *fw_handle); +struct qed_iscsi_stats { + u64 iscsi_rx_bytes_cnt; + u64 iscsi_rx_packet_cnt; + u64 iscsi_rx_new_ooo_isle_events_cnt; + u32 iscsi_cmdq_threshold_cnt; + u32 iscsi_rq_threshold_cnt; + u32 iscsi_immq_threshold_cnt; + + u64 iscsi_rx_dropped_pdus_task_not_valid; + + u64 iscsi_rx_data_pdu_cnt; + u64 iscsi_rx_r2t_pdu_cnt; + u64 iscsi_rx_total_pdu_cnt; + + u64 iscsi_tx_go_to_slow_start_event_cnt; + u64 iscsi_tx_fast_retransmit_event_cnt; + + u64 iscsi_tx_data_pdu_cnt; + u64 iscsi_tx_r2t_pdu_cnt; + u64 iscsi_tx_total_pdu_cnt; + + u64 iscsi_tx_bytes_cnt; + u64 iscsi_tx_packet_cnt; +}; + +struct qed_dev_iscsi_info { + struct qed_dev_info common; + + void __iomem *primary_dbq_rq_addr; + void __iomem *secondary_bdq_rq_addr; +}; + +struct qed_iscsi_id_params { + u8 mac[ETH_ALEN]; + u32 ip[4]; + u16 port; +}; + +struct qed_iscsi_params_offload { + u8 layer_code; + dma_addr_t sq_pbl_addr; + u32 initial_ack; + + struct qed_iscsi_id_params src; + struct qed_iscsi_id_params dst; + u16 vlan_id; + u8 tcp_flags; + u8 ip_version; + u8 default_cq; + + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + u32 rcv_next; + u32 snd_una; + u32 snd_next; + u32 snd_max; + u32 snd_wnd; + u32 rcv_wnd; + u32 snd_wl1; + u32 cwnd; + u32 ss_thresh; + u16 srtt; + u16 rtt_var; + u32 ts_time; + u32 ts_recent; + u32 ts_recent_age; + u32 total_rt; + u32 ka_timeout_delta; + u32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + u32 flow_label; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 initial_rcv_wnd; + u8 ttl; + u8 tos_or_tc; + u16 remote_port; + u16 local_port; + u16 mss; + u8 snd_wnd_scale; + u8 rcv_wnd_scale; + u32 ts_ticks_per_second; + u16 da_timeout_value; + u8 ack_frequency; +}; + +struct qed_iscsi_params_update { + u8 update_flag; +#define QED_ISCSI_CONN_HD_EN BIT(0) +#define QED_ISCSI_CONN_DD_EN BIT(1) +#define QED_ISCSI_CONN_INITIAL_R2T BIT(2) +#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3) + + u32 max_seq_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + u32 first_seq_length; + u32 exp_stat_sn; +}; + +#define MAX_TID_BLOCKS_ISCSI (512) +struct qed_iscsi_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_ISCSI]; +}; + +struct qed_iscsi_cb_ops { + struct qed_common_cb_ops common; +}; + +/** + * struct qed_iscsi_ops - qed iSCSI operations. + * @common: common operations pointer + * @ll2: light L2 operations pointer + * @fill_dev_info: fills iSCSI specific information + * @param cdev + * @param info + * @return 0 on sucesss, otherwise error value. + * @register_ops: register iscsi operations + * @param cdev + * @param ops - specified using qed_iscsi_cb_ops + * @param cookie - driver private + * @start: iscsi in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: iscsi in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new iscsi connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * @return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired iscsi connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @update_conn: updates an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @clear_sq: clear all task in sq + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @get_stats: iSCSI related statistics + * @param cdev + * @param stats - pointer to struck that would be filled + * we stats + * @return 0 on success, error otherwise. + */ +struct qed_iscsi_ops { + const struct qed_common_ops *common; + + const struct qed_ll2_ops *ll2; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_iscsi_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_iscsi_cb_ops *ops, void *cookie); + + int (*start)(struct qed_dev *cdev, + struct qed_iscsi_tid *tasks, + void *event_context, iscsi_event_cb_t async_event_cb); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_offload *conn_info); + + int (*update_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_update *conn_info); + + int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn); + + int (*clear_sq)(struct qed_dev *cdev, u32 handle); + + int (*get_stats)(struct qed_dev *cdev, + struct qed_iscsi_stats *stats); +}; + +const struct qed_iscsi_ops *qed_get_iscsi_ops(void); +void qed_put_iscsi_ops(void); +#endif diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index af3581b8a451..5dea8f6440e4 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -80,26 +80,25 @@ static inline bool radix_tree_is_internal_node(void *ptr) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* Internally used bits of node->count */ -#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) -#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) - +/* + * @count is the count of every non-NULL element in the ->slots array + * whether that is an exceptional entry, a retry entry, a user pointer, + * a sibling entry or a pointer to the next level of the tree. + * @exceptional is the count of every element in ->slots which is + * either radix_tree_exceptional_entry() or is a sibling entry for an + * exceptional entry. + */ struct radix_tree_node { - unsigned char shift; /* Bits remaining in each slot */ - unsigned char offset; /* Slot offset in parent */ - unsigned int count; + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned char count; /* Total entry count */ + unsigned char exceptional; /* Exceptional entry count */ + struct radix_tree_node *parent; /* Used when ascending tree */ + void *private_data; /* For tree user */ union { - struct { - /* Used when ascending tree */ - struct radix_tree_node *parent; - /* For tree user */ - void *private_data; - }; - /* Used when freeing node */ - struct rcu_head rcu_head; + struct list_head private_list; /* For tree user */ + struct rcu_head rcu_head; /* Used when freeing node */ }; - /* For tree user */ - struct list_head private_list; void __rcu *slots[RADIX_TREE_MAP_SIZE]; unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; @@ -130,6 +129,41 @@ static inline bool radix_tree_empty(struct radix_tree_root *root) } /** + * struct radix_tree_iter - radix tree iterator state + * + * @index: index of current slot + * @next_index: one beyond the last index for this chunk + * @tags: bit-mask for tag-iterating + * @node: node that contains current slot + * @shift: shift for the node that holds our slots + * + * This radix tree iterator works in terms of "chunks" of slots. A chunk is a + * subinterval of slots contained within one radix tree leaf node. It is + * described by a pointer to its first slot and a struct radix_tree_iter + * which holds the chunk's position in the tree and its size. For tagged + * iteration radix_tree_iter also holds the slots' bit-mask for one chosen + * radix tree tag. + */ +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; + struct radix_tree_node *node; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif +}; + +static inline unsigned int iter_shift(const struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + +/** * Radix-tree synchronization * * The radix-tree API requires that users provide all synchronisation (with @@ -248,20 +282,6 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -/** - * radix_tree_replace_slot - replace item in a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * @item: new item to store in the slot. - * - * For use with radix_tree_lookup_slot(). Caller must hold tree write locked - * across slot lookup and replacement. - */ -static inline void radix_tree_replace_slot(void **pslot, void *item) -{ - BUG_ON(radix_tree_is_internal_node(item)); - rcu_assign_pointer(*pslot, item); -} - int __radix_tree_create(struct radix_tree_root *root, unsigned long index, unsigned order, struct radix_tree_node **nodep, void ***slotp); @@ -276,7 +296,16 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, struct radix_tree_node **nodep, void ***slotp); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); -bool __radix_tree_delete_node(struct radix_tree_root *root, +typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); +void __radix_tree_replace(struct radix_tree_root *root, + struct radix_tree_node *node, + void **slot, void *item, + radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_replace(struct radix_tree_root *, + const struct radix_tree_iter *, void **slot, void *item); +void radix_tree_replace_slot(struct radix_tree_root *root, + void **slot, void *item); +void __radix_tree_delete_node(struct radix_tree_root *root, struct radix_tree_node *node); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); @@ -299,6 +328,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag); int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag); +void radix_tree_iter_tag_set(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, @@ -307,50 +338,18 @@ unsigned int radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, unsigned long first_index, unsigned int max_items, unsigned int tag); -unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, - unsigned long *first_indexp, unsigned long last_index, - unsigned long nr_to_tag, - unsigned int fromtag, unsigned int totag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); -unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); static inline void radix_tree_preload_end(void) { preempt_enable(); } -/** - * struct radix_tree_iter - radix tree iterator state - * - * @index: index of current slot - * @next_index: one beyond the last index for this chunk - * @tags: bit-mask for tag-iterating - * @shift: shift for the node that holds our slots - * - * This radix tree iterator works in terms of "chunks" of slots. A chunk is a - * subinterval of slots contained within one radix tree leaf node. It is - * described by a pointer to its first slot and a struct radix_tree_iter - * which holds the chunk's position in the tree and its size. For tagged - * iteration radix_tree_iter also holds the slots' bit-mask for one chosen - * radix tree tag. - */ -struct radix_tree_iter { - unsigned long index; - unsigned long next_index; - unsigned long tags; -#ifdef CONFIG_RADIX_TREE_MULTIORDER - unsigned int shift; -#endif -}; - -static inline unsigned int iter_shift(struct radix_tree_iter *iter) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - return iter->shift; -#else - return 0; -#endif -} +int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); +int radix_tree_split(struct radix_tree_root *, unsigned long index, + unsigned new_order); +int radix_tree_join(struct radix_tree_root *, unsigned long index, + unsigned new_order, void *); #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ @@ -419,20 +418,17 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) } /** - * radix_tree_iter_next - resume iterating when the chunk may be invalid - * @iter: iterator state + * radix_tree_iter_resume - resume iterating when the chunk may be invalid + * @slot: pointer to current slot + * @iter: iterator state + * Returns: New slot pointer * * If the iterator needs to release then reacquire a lock, the chunk may * have been invalidated by an insertion or deletion. Call this function - * to continue the iteration from the next index. + * before releasing the lock to continue the iteration from the next index. */ -static inline __must_check -void **radix_tree_iter_next(struct radix_tree_iter *iter) -{ - iter->next_index = __radix_tree_iter_add(iter, 1); - iter->tags = 0; - return NULL; -} +void **__must_check radix_tree_iter_resume(void **slot, + struct radix_tree_iter *iter); /** * radix_tree_chunk_size - get current chunk size @@ -446,10 +442,17 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) return (iter->next_index - iter->index) >> iter_shift(iter); } -static inline struct radix_tree_node *entry_to_node(void *ptr) +#ifdef CONFIG_RADIX_TREE_MULTIORDER +void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, + unsigned flags); +#else +/* Can't happen without sibling entries, but the compiler can't tell that */ +static inline void ** __radix_tree_next_slot(void **slot, + struct radix_tree_iter *iter, unsigned flags) { - return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); + return slot; } +#endif /** * radix_tree_next_slot - find next slot in chunk @@ -463,7 +466,7 @@ static inline struct radix_tree_node *entry_to_node(void *ptr) * For tagged lookup it also eats @iter->tags. * * There are several cases where 'slot' can be passed in as NULL to this - * function. These cases result from the use of radix_tree_iter_next() or + * function. These cases result from the use of radix_tree_iter_resume() or * radix_tree_iter_retry(). In these cases we don't end up dereferencing * 'slot' because either: * a) we are doing tagged iteration and iter->tags has been set to 0, or @@ -474,51 +477,31 @@ static __always_inline void ** radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { - void *canon = slot; - iter->tags >>= 1; if (unlikely(!iter->tags)) return NULL; - while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && - radix_tree_is_internal_node(slot[1])) { - if (entry_to_node(slot[1]) == canon) { - iter->tags >>= 1; - iter->index = __radix_tree_iter_add(iter, 1); - slot++; - continue; - } - iter->next_index = __radix_tree_iter_add(iter, 1); - return NULL; - } if (likely(iter->tags & 1ul)) { iter->index = __radix_tree_iter_add(iter, 1); - return slot + 1; + slot++; + goto found; } if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); - iter->tags >>= offset; - iter->index = __radix_tree_iter_add(iter, offset + 1); - return slot + offset + 1; + iter->tags >>= offset++; + iter->index = __radix_tree_iter_add(iter, offset); + slot += offset; + goto found; } } else { long count = radix_tree_chunk_size(iter); - void *canon = slot; while (--count > 0) { slot++; iter->index = __radix_tree_iter_add(iter, 1); - if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && - radix_tree_is_internal_node(*slot)) { - if (entry_to_node(*slot) == canon) - continue; - iter->next_index = iter->index; - break; - } - if (likely(*slot)) - return slot; + goto found; if (flags & RADIX_TREE_ITER_CONTIG) { /* forbid switching to the next chunk */ iter->next_index = 0; @@ -527,6 +510,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) } } return NULL; + + found: + if (unlikely(radix_tree_is_internal_node(*slot))) + return __radix_tree_next_slot(slot, iter, flags); + return slot; } /** @@ -577,6 +565,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) slot || (slot = radix_tree_next_chunk(root, iter, \ RADIX_TREE_ITER_TAGGED | tag)) ; \ slot = radix_tree_next_slot(slot, iter, \ - RADIX_TREE_ITER_TAGGED)) + RADIX_TREE_ITER_TAGGED | tag)) #endif /* _LINUX_RADIX_TREE_H */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 8beb98dcf14f..4f7a9561b8c4 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -45,19 +45,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) * This is only for internal list manipulation where we know * the prev/next entries already! */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { + if (!__list_add_valid(new, prev, next)) + return; + new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } -#else -void __list_add_rcu(struct list_head *new, - struct list_head *prev, struct list_head *next); -#endif /** * list_add_rcu - add a new entry to rcu-protected list diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 692108222271..ea0fffa5faeb 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -120,6 +120,25 @@ struct regmap; #define REGULATOR_EVENT_PRE_DISABLE 0x400 #define REGULATOR_EVENT_ABORT_DISABLE 0x800 +/* + * Regulator errors that can be queried using regulator_get_error_flags + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * + * NOTE: These errors can be OR'ed together. + */ + +#define REGULATOR_ERROR_UNDER_VOLTAGE BIT(1) +#define REGULATOR_ERROR_OVER_CURRENT BIT(2) +#define REGULATOR_ERROR_REGULATION_OUT BIT(3) +#define REGULATOR_ERROR_FAIL BIT(4) +#define REGULATOR_ERROR_OVER_TEMP BIT(5) + + /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event * @@ -237,6 +256,8 @@ int regulator_get_current_limit(struct regulator *regulator); int regulator_set_mode(struct regulator *regulator, unsigned int mode); unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_get_error_flags(struct regulator *regulator, + unsigned int *flags); int regulator_set_load(struct regulator *regulator, int load_uA); int regulator_allow_bypass(struct regulator *regulator, bool allow); @@ -477,6 +498,12 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator) return REGULATOR_MODE_NORMAL; } +static inline int regulator_get_error_flags(struct regulator *regulator, + unsigned int *flags) +{ + return -EINVAL; +} + static inline int regulator_set_load(struct regulator *regulator, int load_uA) { return REGULATOR_MODE_NORMAL; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 37b532410528..dac8e7b16bc6 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -100,6 +100,7 @@ struct regulator_linear_range { * * @set_mode: Set the configured operating mode for the regulator. * @get_mode: Get the configured operating mode for the regulator. + * @get_error_flags: Get the current error(s) for the regulator. * @get_status: Return actual (not as-configured) status of regulator, as a * REGULATOR_STATUS value (or negative errno) * @get_optimum_mode: Get the most efficient operating mode for the regulator @@ -169,6 +170,9 @@ struct regulator_ops { int (*set_mode) (struct regulator_dev *, unsigned int mode); unsigned int (*get_mode) (struct regulator_dev *); + /* retrieve current error flags on the regulator */ + int (*get_error_flags)(struct regulator_dev *, unsigned int *flags); + /* Time taken to enable or set voltage on the regulator */ int (*enable_time) (struct regulator_dev *); int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 930023b7c825..e2f3a3281d8f 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -400,6 +400,7 @@ enum rproc_crash_type { * @firmware_loading_complete: marks e/o asynchronous firmware loading * @bootaddr: address of first instruction to boot rproc with (optional) * @rvdevs: list of remote virtio devices + * @subdevs: list of subdevices, to following the running state * @notifyids: idr for dynamically assigning rproc-wide unique notify ids * @index: index of this rproc device * @crash_handler: workqueue for handling a crash @@ -407,15 +408,14 @@ enum rproc_crash_type { * @crash_comp: completion used to sync crash handler and the rproc reload * @recovery_disabled: flag that state if recovery was disabled * @max_notifyid: largest allocated notify id. - * @table_ptr: pointer to the resource table in effect - * @cached_table: copy of the resource table + * @table_ptr: our copy of the resource table * @has_iommu: flag to indicate if remote processor is behind an MMU */ struct rproc { struct list_head node; struct iommu_domain *domain; const char *name; - const char *firmware; + char *firmware; void *priv; const struct rproc_ops *ops; struct device dev; @@ -431,6 +431,7 @@ struct rproc { struct completion firmware_loading_complete; u32 bootaddr; struct list_head rvdevs; + struct list_head subdevs; struct idr notifyids; int index; struct work_struct crash_handler; @@ -439,11 +440,23 @@ struct rproc { bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; - struct resource_table *cached_table; bool has_iommu; bool auto_boot; }; +/** + * struct rproc_subdev - subdevice tied to a remoteproc + * @node: list node related to the rproc subdevs list + * @probe: probe function, called as the rproc is started + * @remove: remove function, called as the rproc is stopped + */ +struct rproc_subdev { + struct list_head node; + + int (*probe)(struct rproc_subdev *subdev); + void (*remove)(struct rproc_subdev *subdev); +}; + /* we currently support only two vrings per rvdev */ #define RVDEV_NUM_VRINGS 2 @@ -472,6 +485,9 @@ struct rproc_vring { /** * struct rproc_vdev - remoteproc state for a supported virtio device + * @refcount: reference counter for the vdev and vring allocations + * @subdev: handle for registering the vdev as a rproc subdevice + * @id: virtio device id (as in virtio_ids.h) * @node: list node * @rproc: the rproc handle * @vdev: the virio device @@ -479,6 +495,11 @@ struct rproc_vring { * @rsc_offset: offset of the vdev's resource entry */ struct rproc_vdev { + struct kref refcount; + + struct rproc_subdev subdev; + + unsigned int id; struct list_head node; struct rproc *rproc; struct virtio_device vdev; @@ -511,4 +532,11 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) return rvdev->rproc; } +void rproc_add_subdev(struct rproc *rproc, + struct rproc_subdev *subdev, + int (*probe)(struct rproc_subdev *subdev), + void (*remove)(struct rproc_subdev *subdev)); + +void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); + #endif /* REMOTEPROC_H */ diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h new file mode 100644 index 000000000000..4155556fa4b2 --- /dev/null +++ b/include/linux/remoteproc/st_slim_rproc.h @@ -0,0 +1,58 @@ +/* + * SLIM core rproc driver header + * + * Copyright (C) 2016 STMicroelectronics + * + * Author: Peter Griffin <peter.griffin@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _ST_REMOTEPROC_SLIM_H +#define _ST_REMOTEPROC_SLIM_H + +#define ST_SLIM_MEM_MAX 2 +#define ST_SLIM_MAX_CLK 4 + +enum { + ST_SLIM_DMEM, + ST_SLIM_IMEM, +}; + +/** + * struct st_slim_mem - slim internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @size: Size of the memory region + */ +struct st_slim_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + size_t size; +}; + +/** + * struct st_slim_rproc - SLIM slim core + * @rproc: rproc handle + * @mem: slim memory information + * @slimcore: slim slimcore regs + * @peri: slim peripheral regs + * @clks: slim clocks + */ +struct st_slim_rproc { + struct rproc *rproc; + struct st_slim_mem mem[ST_SLIM_MEM_MAX]; + void __iomem *slimcore; + void __iomem *peri; + + /* st_slim_rproc private */ + struct clk *clks[ST_SLIM_MAX_CLK]; +}; + +struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, + char *fw_name); +void st_slim_rproc_put(struct st_slim_rproc *slim_rproc); + +#endif diff --git a/include/linux/reservation.h b/include/linux/reservation.h index b0f305e77b7f..d9706a6f5ae2 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -40,7 +40,7 @@ #define _LINUX_RESERVATION_H #include <linux/ww_mutex.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/slab.h> #include <linux/seqlock.h> #include <linux/rcupdate.h> @@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[]; struct reservation_object_list { struct rcu_head rcu; u32 shared_count, shared_max; - struct fence __rcu *shared[]; + struct dma_fence __rcu *shared[]; }; /** @@ -74,7 +74,7 @@ struct reservation_object { struct ww_mutex lock; seqcount_t seq; - struct fence __rcu *fence_excl; + struct dma_fence __rcu *fence_excl; struct reservation_object_list __rcu *fence; struct reservation_object_list *staged; }; @@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj) { int i; struct reservation_object_list *fobj; - struct fence *excl; + struct dma_fence *excl; /* * This object should be dead and all references must have @@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj) */ excl = rcu_dereference_protected(obj->fence_excl, 1); if (excl) - fence_put(excl); + dma_fence_put(excl); fobj = rcu_dereference_protected(obj->fence, 1); if (fobj) { for (i = 0; i < fobj->shared_count; ++i) - fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); kfree(fobj); } @@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj) * RETURNS * The exclusive fence or NULL */ -static inline struct fence * +static inline struct dma_fence * reservation_object_get_excl(struct reservation_object *obj) { return rcu_dereference_protected(obj->fence_excl, @@ -173,35 +173,32 @@ reservation_object_get_excl(struct reservation_object *obj) * RETURNS * The exclusive fence or NULL if none */ -static inline struct fence * +static inline struct dma_fence * reservation_object_get_excl_rcu(struct reservation_object *obj) { - struct fence *fence; - unsigned seq; -retry: - seq = read_seqcount_begin(&obj->seq); + struct dma_fence *fence; + + if (!rcu_access_pointer(obj->fence_excl)) + return NULL; + rcu_read_lock(); - fence = rcu_dereference(obj->fence_excl); - if (read_seqcount_retry(&obj->seq, seq)) { - rcu_read_unlock(); - goto retry; - } - fence = fence_get(fence); + fence = dma_fence_get_rcu_safe(&obj->fence_excl); rcu_read_unlock(); + return fence; } int reservation_object_reserve_shared(struct reservation_object *obj); void reservation_object_add_shared_fence(struct reservation_object *obj, - struct fence *fence); + struct dma_fence *fence); void reservation_object_add_excl_fence(struct reservation_object *obj, - struct fence *fence); + struct dma_fence *fence); int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct fence **pfence_excl, + struct dma_fence **pfence_excl, unsigned *pshared_count, - struct fence ***pshared); + struct dma_fence ***pshared); long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h new file mode 100644 index 000000000000..0d905d8ec553 --- /dev/null +++ b/include/linux/restart_block.h @@ -0,0 +1,51 @@ +/* + * Common syscall restarting data + */ +#ifndef __LINUX_RESTART_BLOCK_H +#define __LINUX_RESTART_BLOCK_H + +#include <linux/compiler.h> +#include <linux/types.h> + +struct timespec; +struct compat_timespec; +struct pollfd; + +/* + * System call restart block. + */ +struct restart_block { + long (*fn)(struct restart_block *); + union { + /* For futex_wait and futex_wait_requeue_pi */ + struct { + u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __user *uaddr2; + } futex; + /* For nanosleep */ + struct { + clockid_t clockid; + struct timespec __user *rmtp; +#ifdef CONFIG_COMPAT + struct compat_timespec __user *compat_rmtp; +#endif + u64 expires; + } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); + +#endif /* __LINUX_RESTART_BLOCK_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 4acc552e9279..b6d4568795a7 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -198,4 +198,10 @@ enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, }; +#ifdef CONFIG_RING_BUFFER +int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); +#else +#define trace_rb_cpu_prepare NULL +#endif + #endif /* _LINUX_RING_BUFFER_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b46bb5620a76..15321fb1df6b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -137,11 +137,19 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) * anon_vma helper functions. */ void anon_vma_init(void); /* create anon_vma_cachep */ -int anon_vma_prepare(struct vm_area_struct *); +int __anon_vma_prepare(struct vm_area_struct *); void unlink_anon_vmas(struct vm_area_struct *); int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); +static inline int anon_vma_prepare(struct vm_area_struct *vma) +{ + if (likely(vma->anon_vma)) + return 0; + + return __anon_vma_prepare(vma); +} + static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 452d393cc8dd..18f9e1ae4b7e 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -37,6 +37,7 @@ #include <linux/types.h> #include <linux/device.h> +#include <linux/err.h> #include <linux/mod_devicetable.h> #include <linux/kref.h> #include <linux/mutex.h> @@ -64,6 +65,7 @@ struct rpmsg_channel_info { * rpmsg_device - device that belong to the rpmsg bus * @dev: the device struct * @id: device id (used to match between rpmsg drivers and devices) + * @driver_override: driver name to force a match * @src: local address * @dst: destination address * @ept: the rpmsg endpoint of this channel @@ -72,6 +74,7 @@ struct rpmsg_channel_info { struct rpmsg_device { struct device dev; struct rpmsg_device_id id; + char *driver_override; u32 src; u32 dst; struct rpmsg_endpoint *ept; @@ -132,6 +135,8 @@ struct rpmsg_driver { int (*callback)(struct rpmsg_device *, void *, int, void *, u32); }; +#if IS_ENABLED(CONFIG_RPMSG) + int register_rpmsg_device(struct rpmsg_device *dev); void unregister_rpmsg_device(struct rpmsg_device *dev); int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); @@ -141,6 +146,116 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, rpmsg_rx_cb_t cb, void *priv, struct rpmsg_channel_info chinfo); +int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +#else + +static inline int register_rpmsg_device(struct rpmsg_device *dev) +{ + return -ENXIO; +} + +static inline void unregister_rpmsg_device(struct rpmsg_device *dev) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int __register_rpmsg_driver(struct rpmsg_driver *drv, + struct module *owner) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, + rpmsg_rx_cb_t cb, + void *priv, + struct rpmsg_channel_info chinfo) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return ERR_PTR(-ENXIO); +} + +static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, + u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; + +} + +static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, + int len, u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +#endif /* IS_ENABLED(CONFIG_RPMSG) */ + /* use a macro to avoid include chaining to get THIS_MODULE */ #define register_rpmsg_driver(drv) \ __register_rpmsg_driver(drv, THIS_MODULE) @@ -157,14 +272,4 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, module_driver(__rpmsg_driver, register_rpmsg_driver, \ unregister_rpmsg_driver) -int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - -int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - #endif /* _LINUX_RPMSG_H */ diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h new file mode 100644 index 000000000000..e674b2e3074b --- /dev/null +++ b/include/linux/rpmsg/qcom_smd.h @@ -0,0 +1,33 @@ + +#ifndef _LINUX_RPMSG_QCOM_SMD_H +#define _LINUX_RPMSG_QCOM_SMD_H + +#include <linux/device.h> + +struct qcom_smd_edge; + +#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD) + +struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, + struct device_node *node); +int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); + +#else + +static inline struct qcom_smd_edge * +qcom_smd_register_edge(struct device *parent, + struct device_node *node) +{ + return ERR_PTR(-ENXIO); +} + +static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return -ENXIO; +} + +#endif + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 348f51b0ec92..a440cf178191 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -262,20 +262,9 @@ extern char ___assert_task_state[1 - 2*!!( #define set_task_state(tsk, state_value) \ do { \ (tsk)->task_state_change = _THIS_IP_; \ - smp_store_mb((tsk)->state, (state_value)); \ + smp_store_mb((tsk)->state, (state_value)); \ } while (0) -/* - * set_current_state() includes a barrier so that the write of current->state - * is correctly serialised wrt the caller's subsequent test of whether to - * actually sleep: - * - * set_current_state(TASK_UNINTERRUPTIBLE); - * if (do_i_need_to_sleep()) - * schedule(); - * - * If the caller does not need such serialisation then use __set_current_state() - */ #define __set_current_state(state_value) \ do { \ current->task_state_change = _THIS_IP_; \ @@ -284,11 +273,19 @@ extern char ___assert_task_state[1 - 2*!!( #define set_current_state(state_value) \ do { \ current->task_state_change = _THIS_IP_; \ - smp_store_mb(current->state, (state_value)); \ + smp_store_mb(current->state, (state_value)); \ } while (0) #else +/* + * @tsk had better be current, or you get to keep the pieces. + * + * The only reason is that computing current can be more expensive than + * using a pointer that's already available. + * + * Therefore, see set_current_state(). + */ #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) #define set_task_state(tsk, state_value) \ @@ -299,11 +296,34 @@ extern char ___assert_task_state[1 - 2*!!( * is correctly serialised wrt the caller's subsequent test of whether to * actually sleep: * + * for (;;) { * set_current_state(TASK_UNINTERRUPTIBLE); - * if (do_i_need_to_sleep()) - * schedule(); + * if (!need_sleep) + * break; + * + * schedule(); + * } + * __set_current_state(TASK_RUNNING); + * + * If the caller does not need such serialisation (because, for instance, the + * condition test and condition change and wakeup are under the same lock) then + * use __set_current_state(). + * + * The above is typically ordered against the wakeup, which does: + * + * need_sleep = false; + * wake_up_state(p, TASK_UNINTERRUPTIBLE); + * + * Where wake_up_state() (and all other wakeup primitives) imply enough + * barriers to order the store of the variable against wakeup. * - * If the caller does not need such serialisation then use __set_current_state() + * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, + * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a + * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). + * + * This is obviously fine, since they both store the exact same value. + * + * Also see the comments of try_to_wake_up(). */ #define __set_current_state(state_value) \ do { current->state = (state_value); } while (0) @@ -520,7 +540,11 @@ static inline int get_dumpable(struct mm_struct *mm) /* leave room for more dump flags */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ -#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ #define MMF_HAS_UPROBES 19 /* has uprobes */ #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ @@ -989,7 +1013,7 @@ enum cpu_idle_type { * already in a wake queue, the wakeup will happen soon and the second * waker can just skip it. * - * The WAKE_Q macro declares and initializes the list head. + * The DEFINE_WAKE_Q macro declares and initializes the list head. * wake_up_q() does NOT reinitialize the list; it's expected to be * called near the end of a function, where the fact that the queue is * not used again will be easy to see by inspection. @@ -1009,7 +1033,7 @@ struct wake_q_head { #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) -#define WAKE_Q(name) \ +#define DEFINE_WAKE_Q(name) \ struct wake_q_head name = { WAKE_Q_TAIL, &name.first } extern void wake_q_add(struct wake_q_head *head, @@ -1057,6 +1081,8 @@ static inline int cpu_numa_flags(void) } #endif +extern int arch_asym_cpu_priority(int cpu); + struct sched_domain_attr { int relax_domain_level; }; @@ -1627,7 +1653,10 @@ struct task_struct { int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - cputime_t utime, stime, utimescaled, stimescaled; + cputime_t utime, stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + cputime_t utimescaled, stimescaled; +#endif cputime_t gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN @@ -1656,6 +1685,7 @@ struct task_struct { struct list_head cpu_timers[3]; /* process credentials */ + const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ const struct cred __rcu *real_cred; /* objective and real subjective task * credentials (COW) */ const struct cred __rcu *cred; /* effective (overridable) subjective task @@ -2220,40 +2250,45 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime); -extern void task_cputime_scaled(struct task_struct *t, - cputime_t *utimescaled, cputime_t *stimescaled); extern cputime_t task_gtime(struct task_struct *t); #else static inline void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) { - if (utime) - *utime = t->utime; - if (stime) - *stime = t->stime; + *utime = t->utime; + *stime = t->stime; } +static inline cputime_t task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif + +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME static inline void task_cputime_scaled(struct task_struct *t, cputime_t *utimescaled, cputime_t *stimescaled) { - if (utimescaled) - *utimescaled = t->utimescaled; - if (stimescaled) - *stimescaled = t->stimescaled; + *utimescaled = t->utimescaled; + *stimescaled = t->stimescaled; } - -static inline cputime_t task_gtime(struct task_struct *t) +#else +static inline void task_cputime_scaled(struct task_struct *t, + cputime_t *utimescaled, + cputime_t *stimescaled) { - return t->gtime; + task_cputime(t, utimescaled, stimescaled); } #endif + extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); /* * Per process flags */ +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ @@ -2444,6 +2479,10 @@ static inline void calc_load_enter_idle(void) { } static inline void calc_load_exit_idle(void) { } #endif /* CONFIG_NO_HZ_COMMON */ +#ifndef cpu_relax_yield +#define cpu_relax_yield() cpu_relax() +#endif + /* * Do not use outside of architecture code which knows its limitations. * @@ -2567,6 +2606,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p); extern void sched_autogroup_detach(struct task_struct *p); extern void sched_autogroup_fork(struct signal_struct *sig); extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); #ifdef CONFIG_PROC_FS extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); @@ -2576,6 +2616,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { } static inline void sched_autogroup_detach(struct task_struct *p) { } static inline void sched_autogroup_fork(struct signal_struct *sig) { } static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } #endif extern int yield_to(struct task_struct *p, bool preempt); @@ -2609,7 +2650,7 @@ extern struct task_struct *idle_task(int cpu); */ static inline bool is_idle_task(const struct task_struct *p) { - return p->pid == 0; + return !!(p->flags & PF_IDLE); } extern struct task_struct *curr_task(int cpu); extern void ia64_set_curr_task(int cpu, struct task_struct *p); @@ -3506,6 +3547,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +/* + * In order to reduce various lock holder preemption latencies provide an + * interface to see if a vCPU is currently running or not. + * + * This allows us to terminate optimistic spin loops and block, analogous to + * the native optimistic spin heuristic of testing if the lock owner task is + * running or not. + */ +#ifndef vcpu_is_preempted +# define vcpu_is_preempted(cpu) false +#endif + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 22db1e63707e..441145351301 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -36,7 +36,6 @@ extern unsigned int sysctl_numa_balancing_scan_size; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_time_avg; -extern unsigned int sysctl_sched_shares_window; int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, diff --git a/include/linux/seg6.h b/include/linux/seg6.h new file mode 100644 index 000000000000..7a66d2b4c5a6 --- /dev/null +++ b/include/linux/seg6.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_H +#define _LINUX_SEG6_H + +#include <uapi/linux/seg6.h> + +#endif diff --git a/include/linux/seg6_genl.h b/include/linux/seg6_genl.h new file mode 100644 index 000000000000..d6c3fb4f3734 --- /dev/null +++ b/include/linux/seg6_genl.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_GENL_H +#define _LINUX_SEG6_GENL_H + +#include <uapi/linux/seg6_genl.h> + +#endif diff --git a/include/linux/seg6_hmac.h b/include/linux/seg6_hmac.h new file mode 100644 index 000000000000..da437ebdc6cd --- /dev/null +++ b/include/linux/seg6_hmac.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_HMAC_H +#define _LINUX_SEG6_HMAC_H + +#include <uapi/linux/seg6_hmac.h> + +#endif diff --git a/include/linux/seg6_iptunnel.h b/include/linux/seg6_iptunnel.h new file mode 100644 index 000000000000..5377cf6a5a02 --- /dev/null +++ b/include/linux/seg6_iptunnel.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_IPTUNNEL_H +#define _LINUX_SEG6_IPTUNNEL_H + +#include <uapi/linux/seg6_iptunnel.h> + +#endif diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h index a1ba6a5ccdd6..c58c535d12a8 100644 --- a/include/linux/seqno-fence.h +++ b/include/linux/seqno-fence.h @@ -20,7 +20,7 @@ #ifndef __LINUX_SEQNO_FENCE_H #define __LINUX_SEQNO_FENCE_H -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/dma-buf.h> enum seqno_fence_condition { @@ -29,15 +29,15 @@ enum seqno_fence_condition { }; struct seqno_fence { - struct fence base; + struct dma_fence base; - const struct fence_ops *ops; + const struct dma_fence_ops *ops; struct dma_buf *sync_buf; uint32_t seqno_ofs; enum seqno_fence_condition condition; }; -extern const struct fence_ops seqno_fence_ops; +extern const struct dma_fence_ops seqno_fence_ops; /** * to_seqno_fence - cast a fence to a seqno_fence @@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops; * or the seqno_fence otherwise. */ static inline struct seqno_fence * -to_seqno_fence(struct fence *fence) +to_seqno_fence(struct dma_fence *fence) { if (fence->ops != &seqno_fence_ops) return NULL; @@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence) * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the * device's vm can be expensive. * - * It is recommended for creators of seqno_fence to call fence_signal + * It is recommended for creators of seqno_fence to call dma_fence_signal() * before destruction. This will prevent possible issues from wraparound at - * time of issue vs time of check, since users can check fence_is_signaled + * time of issue vs time of check, since users can check dma_fence_is_signaled() * before submitting instructions for the hardware to wait on the fence. * However, when ops.enable_signaling is not called, it doesn't have to be * done as soon as possible, just before there's any real danger of seqno @@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, struct dma_buf *sync_buf, uint32_t context, uint32_t seqno_ofs, uint32_t seqno, enum seqno_fence_condition cond, - const struct fence_ops *ops) + const struct dma_fence_ops *ops) { BUG_ON(!fence || !sync_buf || !ops); BUG_ON(!ops->wait || !ops->enable_signaling || !ops->get_driver_name || !ops->get_timeline_name); /* - * ops is used in fence_init for get_driver_name, so needs to be + * ops is used in dma_fence_init for get_driver_name, so needs to be * initialized first */ fence->ops = ops; - fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); + dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); get_dma_buf(sync_buf); fence->sync_buf = sync_buf; fence->seqno_ofs = seqno_ofs; diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 48ec7651989b..61fbb440449c 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -36,6 +36,8 @@ struct plat_serial8250_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + void (*set_ldisc)(struct uart_port *, + struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, @@ -94,7 +96,7 @@ struct uart_8250_port { struct uart_port port; struct timer_list timer; /* "no irq" timer */ struct list_head list; /* ports on this IRQ */ - unsigned short capabilities; /* port capabilities */ + u32 capabilities; /* port capabilities */ unsigned short bugs; /* port bugs */ bool fifo_bug; /* min RX trigger if enabled */ unsigned int tx_loadsz; /* transmit fifo load size */ @@ -149,6 +151,8 @@ extern int early_serial8250_setup(struct earlycon_device *device, const char *options); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); +extern void serial8250_do_set_ldisc(struct uart_port *port, + struct ktermios *termios); extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); extern int serial8250_do_startup(struct uart_port *port); extern void serial8250_do_shutdown(struct uart_port *port); @@ -168,6 +172,6 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe); extern void serial8250_set_isa_configurator(void (*v) (int port, struct uart_port *up, - unsigned short *capabilities)); + u32 *capabilities)); #endif diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 344201437017..5d494888a612 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -123,6 +123,8 @@ struct uart_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + void (*set_ldisc)(struct uart_port *, + struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); int (*startup)(struct uart_port *port); diff --git a/include/linux/signal.h b/include/linux/signal.h index b63f63eaa39c..5308304993be 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set) } } +static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) +{ + switch (_NSIG_WORDS) { + case 4: + return (set1->sig[3] == set2->sig[3]) && + (set1->sig[2] == set2->sig[2]) && + (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 2: + return (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 1: + return set1->sig[0] == set2->sig[0]; + } + return 0; +} + #define sigmask(sig) (1UL << ((sig) - 1)) #ifndef __HAVE_ARCH_SIG_SETOPS diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 32810f279f8e..332e76756f54 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -645,8 +645,15 @@ struct sk_buff { struct rb_node rbnode; /* used in netem & tcp stack */ }; struct sock *sk; - struct net_device *dev; + union { + struct net_device *dev; + /* Some protocols might use this space to store information, + * while device pointer would be NULL. + * UDP receive path is one user. + */ + unsigned long dev_scratch; + }; /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you @@ -1087,7 +1094,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) } void __skb_get_hash(struct sk_buff *skb); -u32 __skb_get_hash_symmetric(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(const struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen); @@ -1799,11 +1806,11 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb) return skb->len - skb->data_len; } -static inline int skb_pagelen(const struct sk_buff *skb) +static inline unsigned int skb_pagelen(const struct sk_buff *skb) { - int i, len = 0; + unsigned int i, len = 0; - for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) + for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) len += skb_frag_size(&skb_shinfo(skb)->frags[i]); return len + skb_headlen(skb); } @@ -1966,6 +1973,8 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } +void skb_condense(struct sk_buff *skb); + /** * skb_headroom - bytes at buffer head * @skb: buffer to check @@ -3033,9 +3042,13 @@ static inline void skb_frag_list_init(struct sk_buff *skb) int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, const struct sk_buff *skb); struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), int *peeked, int *off, int *err, struct sk_buff **last); struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index e302c447e057..129bc674dcf5 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -39,6 +39,7 @@ struct smc91x_platdata { unsigned long flags; unsigned char leda; unsigned char ledb; + bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */ }; #endif /* __SMC91X_H__ */ diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h new file mode 100644 index 000000000000..a18e0783946b --- /dev/null +++ b/include/linux/soc/renesas/rcar-rst.h @@ -0,0 +1,6 @@ +#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ +#define __LINUX_SOC_RENESAS_RCAR_RST_H__ + +int rcar_rst_read_mode_pins(u32 *mode); + +#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4b743ac35396..75c6bd0ac605 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -442,6 +442,7 @@ struct spi_master { #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ +#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ /* * on some hardware transfer / message size may be constrained diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 705840e0438f..266dab9ad782 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data { struct stmmac_dma_cfg { int pbl; + int txpbl; + int rxpbl; + bool pblx8; int fixed_burst; int mixed_burst; bool aal; @@ -135,8 +138,6 @@ struct plat_stmmacenet_data { void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); - void (*suspend)(struct platform_device *pdev, void *priv); - void (*resume)(struct platform_device *pdev, void *priv); void *bsp_priv; struct stmmac_axi *axi; int has_gmac4; diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ab02a457da1f..e5d193440374 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -25,6 +25,7 @@ struct svc_xprt_ops { void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); int (*xpo_secure_port)(struct svc_rqst *); + void (*xpo_kill_temp_xprt)(struct svc_xprt *); }; struct svc_xprt_class { diff --git a/include/linux/suspend.h b/include/linux/suspend.h index d9718378a8be..0c729c3c8549 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -194,6 +194,8 @@ struct platform_freeze_ops { }; #ifdef CONFIG_SUSPEND +extern suspend_state_t mem_sleep_default; + /** * suspend_set_ops - set platform dependent suspend operations * @ops: The new suspend operations to set. diff --git a/include/linux/swap.h b/include/linux/swap.h index a56523cefb9b..09f4be179ff3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -246,39 +246,7 @@ struct swap_info_struct { void *workingset_eviction(struct address_space *mapping, struct page *page); bool workingset_refault(void *shadow); void workingset_activation(struct page *page); -extern struct list_lru workingset_shadow_nodes; - -static inline unsigned int workingset_node_pages(struct radix_tree_node *node) -{ - return node->count & RADIX_TREE_COUNT_MASK; -} - -static inline void workingset_node_pages_inc(struct radix_tree_node *node) -{ - node->count++; -} - -static inline void workingset_node_pages_dec(struct radix_tree_node *node) -{ - VM_WARN_ON_ONCE(!workingset_node_pages(node)); - node->count--; -} - -static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) -{ - return node->count >> RADIX_TREE_COUNT_SHIFT; -} - -static inline void workingset_node_shadows_inc(struct radix_tree_node *node) -{ - node->count += 1U << RADIX_TREE_COUNT_SHIFT; -} - -static inline void workingset_node_shadows_dec(struct radix_tree_node *node) -{ - VM_WARN_ON_ONCE(!workingset_node_shadows(node)); - node->count -= 1U << RADIX_TREE_COUNT_SHIFT; -} +void workingset_update_node(struct radix_tree_node *node, void *private); /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; @@ -351,6 +319,9 @@ extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_SWAP + +#include <linux/blk_types.h> /* for bio_end_io_t */ + /* linux/mm/page_io.c */ extern int swap_readpage(struct page *); extern int swap_writepage(struct page *page, struct writeback_control *wbc); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5f81f8a187f2..183f37c8a5e1 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -44,11 +44,13 @@ enum dma_sync_target { extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t phys, size_t size, - enum dma_data_direction dir); + enum dma_data_direction dir, + unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); + size_t size, enum dma_data_direction dir, + unsigned long attrs); extern void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, @@ -73,14 +75,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, unsigned long attrs); extern int -swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, - enum dma_data_direction dir); - -extern void -swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - enum dma_data_direction dir); - -extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs); diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index aa17ccfc2f57..3e3ab84fc4cd 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -18,8 +18,8 @@ #include <linux/ktime.h> #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/fence.h> -#include <linux/fence-array.h> +#include <linux/dma-fence.h> +#include <linux/dma-fence-array.h> /** * struct sync_file - sync file to export to the userspace @@ -41,13 +41,13 @@ struct sync_file { wait_queue_head_t wq; - struct fence *fence; - struct fence_cb cb; + struct dma_fence *fence; + struct dma_fence_cb cb; }; -#define POLL_ENABLED FENCE_FLAG_USER_BITS +#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS -struct sync_file *sync_file_create(struct fence *fence); -struct fence *sync_file_get_fence(int fd); +struct sync_file *sync_file_create(struct dma_fence *fence); +struct dma_fence *sync_file_get_fence(int fd); #endif /* _LINUX_SYNC_H */ diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h index 2739ccb69571..bed223b70217 100644 --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h @@ -13,6 +13,7 @@ struct soc_device_attribute { const char *family; const char *revision; const char *soc_id; + const void *data; }; /** @@ -34,4 +35,12 @@ void soc_device_unregister(struct soc_device *soc_dev); */ struct device *soc_device_to_device(struct soc_device *soc); +#ifdef CONFIG_SOC_BUS +const struct soc_device_attribute *soc_device_match( + const struct soc_device_attribute *matches); +#else +static inline const struct soc_device_attribute *soc_device_match( + const struct soc_device_attribute *matches) { return NULL; } +#endif + #endif /* __SOC_BUS_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index a17ae7b85218..fc5848dad7a4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -123,6 +123,7 @@ struct tcp_request_sock { u32 txhash; u32 rcv_isn; u32 snt_isn; + u32 ts_off; u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# @@ -176,8 +177,6 @@ struct tcp_sock { * sum(delta(snd_una)), or how many bytes * were acked. */ - struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */ - u32 snd_una; /* First byte we want an ack for */ u32 snd_sml; /* Last byte of the most recently transmitted small packet */ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ @@ -187,7 +186,6 @@ struct tcp_sock { u32 tsoffset; /* timestamp offset */ struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ - unsigned long tsq_flags; /* Data for direct copy to user */ struct { @@ -213,8 +211,11 @@ struct tcp_sock { u8 reord; /* reordering detected */ } rack; u16 advmss; /* Advertised MSS */ - u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ - unused:7; + u32 chrono_start; /* Start time in jiffies of a TCP chrono */ + u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ + u8 chrono_type:2, /* current chronograph type */ + rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ + unused:5; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ thin_dupack : 1,/* Fast retransmit on first dupack */ @@ -362,7 +363,7 @@ struct tcp_sock { u32 *saved_syn; }; -enum tsq_flags { +enum tsq_enum { TSQ_THROTTLED, TSQ_QUEUED, TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ @@ -373,6 +374,15 @@ enum tsq_flags { */ }; +enum tsq_flags { + TSQF_THROTTLED = (1UL << TSQ_THROTTLED), + TSQF_QUEUED = (1UL << TSQ_QUEUED), + TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), + TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), + TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), + TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), +}; + static inline struct tcp_sock *tcp_sk(const struct sock *sk) { return (struct tcp_sock *)sk; @@ -427,4 +437,6 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) tp->saved_syn = NULL; } +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); + #endif /* _LINUX_TCP_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 511182a88e76..e275e98bdceb 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -28,6 +28,7 @@ #include <linux/of.h> #include <linux/idr.h> #include <linux/device.h> +#include <linux/sysfs.h> #include <linux/workqueue.h> #include <uapi/linux/thermal.h> @@ -204,6 +205,7 @@ struct thermal_zone_device { int id; char type[THERMAL_NAME_LENGTH]; struct device device; + struct attribute_group trips_attribute_group; struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 2873baf5372a..58373875e8ee 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -9,50 +9,17 @@ #include <linux/types.h> #include <linux/bug.h> - -struct timespec; -struct compat_timespec; +#include <linux/restart_block.h> #ifdef CONFIG_THREAD_INFO_IN_TASK -#define current_thread_info() ((struct thread_info *)current) -#endif - /* - * System call restart block. + * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the + * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, + * including <asm/current.h> can cause a circular dependency on some platforms. */ -struct restart_block { - long (*fn)(struct restart_block *); - union { - /* For futex_wait and futex_wait_requeue_pi */ - struct { - u32 __user *uaddr; - u32 val; - u32 flags; - u32 bitset; - u64 time; - u32 __user *uaddr2; - } futex; - /* For nanosleep */ - struct { - clockid_t clockid; - struct timespec __user *rmtp; -#ifdef CONFIG_COMPAT - struct compat_timespec __user *compat_rmtp; +#include <asm/current.h> +#define current_thread_info() ((struct thread_info *)current) #endif - u64 expires; - } nanosleep; - /* For poll */ - struct { - struct pollfd __user *ufds; - int nfds; - int has_timeout; - unsigned long tv_sec; - unsigned long tv_nsec; - } poll; - }; -}; - -extern long do_no_restart_syscall(struct restart_block *parm); #include <linux/bitops.h> #include <asm/thread_info.h> diff --git a/include/linux/time.h b/include/linux/time.h index 4cea09d94208..23f0f5ce3090 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -172,8 +172,6 @@ extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue); extern int do_getitimer(int which, struct itimerval *value); -extern unsigned int alarm_setitimer(unsigned int seconds); - extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); struct tms; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 09168c52ab64..361f8bf1429d 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -249,6 +249,7 @@ static inline u64 ktime_get_raw_ns(void) extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); +extern u64 ktime_get_boot_fast_ns(void); /* * Timespec interfaces utilizing the ktime based ones diff --git a/include/linux/udp.h b/include/linux/udp.h index d1fd8cd39478..c0f530809d1f 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -79,6 +79,9 @@ struct udp_sock { int (*gro_complete)(struct sock *sk, struct sk_buff *skb, int nhoff); + + /* This field is dirtied by udp_recvmsg() */ + int forward_deficit; }; static inline struct udp_sock *udp_sk(const struct sock *sk) diff --git a/include/linux/uio.h b/include/linux/uio.h index 6e22b544d039..d5aba1512b8b 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -125,7 +125,7 @@ static inline bool iter_is_iovec(const struct iov_iter *i) * * The ?: is just for type safety. */ -#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK) +#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE)) /* * Cap the iov_iter by given limit; note that the second argument is diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 4a29c75b146e..0a294e950df8 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -27,6 +27,7 @@ #include <linux/errno.h> #include <linux/rbtree.h> #include <linux/types.h> +#include <linux/wait.h> struct vm_area_struct; struct mm_struct; diff --git a/include/linux/usb.h b/include/linux/usb.h index eba1f10e8cfd..7e68259360de 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1160,7 +1160,7 @@ extern struct bus_type usb_bus_type; * @minor_base: the start of the minor range for this driver. * * This structure is used for the usb_register_dev() and - * usb_unregister_dev() functions, to consolidate a number of the + * usb_deregister_dev() functions, to consolidate a number of the * parameters used for them. */ struct usb_class_driver { diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 3a375d07d0dc..00d232406f18 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -81,7 +81,8 @@ #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) /* Driver flags */ -#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ +#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ +#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */ #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 8e81f9eb95e4..e4516e9ded0f 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -429,7 +429,9 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) */ static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) { - return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize)); + int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; + + return round_up(len, max_packet_size); } /** diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 66fc13705ab7..40edf6a8533e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -566,21 +566,22 @@ extern void usb_ep0_reinit(struct usb_device *); ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ +#define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) /* GetBusState and SetHubDescriptor are optional, omitted */ -#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) -#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) -#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) -#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) -#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) -#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) -#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) +#define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE) +#define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE) +#define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR) +#define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS) +#define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) +#define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) +#define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) /*-------------------------------------------------------------------------*/ /* class requests from USB 3.1 hub spec, table 10-7 */ -#define SetHubDepth (0x2000 | HUB_SET_DEPTH) -#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT) +#define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH) +#define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT) /* * Generic bandwidth allocation constants/support diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index dd66a952e8cd..11b92b047a1e 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -27,7 +27,7 @@ #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) -extern int handle_userfault(struct fault_env *fe, unsigned long reason); +extern int handle_userfault(struct vm_fault *vmf, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len); @@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) #else /* CONFIG_USERFAULTFD */ /* mm helpers */ -static inline int handle_userfault(struct fault_env *fe, unsigned long reason) +static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason) { return VM_FAULT_SIGBUS; } diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 0ecae0b1cd34..edf9b2cad277 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -75,7 +75,16 @@ struct vfio_iommu_driver_ops { struct iommu_group *group); void (*detach_group)(void *iommu_data, struct iommu_group *group); - + int (*pin_pages)(void *iommu_data, unsigned long *user_pfn, + int npage, int prot, + unsigned long *phys_pfn); + int (*unpin_pages)(void *iommu_data, + unsigned long *user_pfn, int npage); + int (*register_notifier)(void *iommu_data, + unsigned long *events, + struct notifier_block *nb); + int (*unregister_notifier)(void *iommu_data, + struct notifier_block *nb); }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); @@ -92,6 +101,36 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group); extern long vfio_external_check_extension(struct vfio_group *group, unsigned long arg); +#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) + +extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, + int npage, int prot, unsigned long *phys_pfn); +extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, + int npage); + +/* each type has independent events */ +enum vfio_notify_type { + VFIO_IOMMU_NOTIFY = 0, + VFIO_GROUP_NOTIFY = 1, +}; + +/* events for VFIO_IOMMU_NOTIFY */ +#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) + +/* events for VFIO_GROUP_NOTIFY */ +#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0) + +extern int vfio_register_notifier(struct device *dev, + enum vfio_notify_type type, + unsigned long *required_events, + struct notifier_block *nb); +extern int vfio_unregister_notifier(struct device *dev, + enum vfio_notify_type type, + struct notifier_block *nb); + +struct kvm; +extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm); + /* * Sub-module helpers */ @@ -103,6 +142,13 @@ extern struct vfio_info_cap_header *vfio_info_cap_add( struct vfio_info_cap *caps, size_t size, u16 id, u16 version); extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); +extern int vfio_info_add_capability(struct vfio_info_cap *caps, + int cap_type_id, void *cap_type); + +extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, + int num_irqs, int max_irq_type, + size_t *data_size); + struct pci_dev; #ifdef CONFIG_EEH extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 1c912f85e041..66204007d7ac 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -58,7 +58,7 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, struct virtio_net_hdr *hdr, bool little_endian) { - memset(hdr, 0, sizeof(*hdr)); + memset(hdr, 0, sizeof(*hdr)); /* no info leak */ if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); @@ -98,4 +98,4 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, return 0; } -#endif /* _LINUX_VIRTIO_BYTEORDER */ +#endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3d9d786a943c..d68edffbf142 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, const void *caller); extern void vfree(const void *addr); +extern void vfree_atomic(const void *addr); extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); diff --git a/include/linux/vme.h b/include/linux/vme.h index ea6095deba20..8c589176c2f8 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h @@ -113,7 +113,6 @@ struct vme_driver { int (*match)(struct vme_dev *); int (*probe)(struct vme_dev *); int (*remove)(struct vme_dev *); - void (*shutdown)(void); struct device_driver driver; struct list_head devices; }; diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 6abd24f258bc..833fdd4794a0 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -191,5 +191,7 @@ extern void vt_set_led_state(int console, int leds); extern void vt_kbd_con_start(int console); extern void vt_kbd_con_stop(int console); +void vc_scrolldelta_helper(struct vc_data *c, int lines, + unsigned int rolled_over, void *_base, unsigned int size); #endif /* _VT_KERN_H */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index fc6e22186405..a26cc437293c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -119,18 +119,30 @@ struct delayed_work { int cpu; }; -/* - * A struct for workqueue attributes. This can be used to change - * attributes of an unbound workqueue. +/** + * struct workqueue_attrs - A struct for workqueue attributes. * - * Unlike other fields, ->no_numa isn't a property of a worker_pool. It - * only modifies how apply_workqueue_attrs() select pools and thus doesn't - * participate in pool hash calculations or equality comparisons. + * This can be used to change attributes of an unbound workqueue. */ struct workqueue_attrs { - int nice; /* nice level */ - cpumask_var_t cpumask; /* allowed CPUs */ - bool no_numa; /* disable NUMA affinity */ + /** + * @nice: nice level + */ + int nice; + + /** + * @cpumask: allowed CPUs + */ + cpumask_var_t cpumask; + + /** + * @no_numa: disable NUMA affinity + * + * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It + * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus + * doesn't participate in pool hash calculations or equality comparisons. + */ + bool no_numa; }; static inline struct delayed_work *to_delayed_work(struct work_struct *work) @@ -272,7 +284,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } /* * Workqueue flags and constants. For details, please refer to - * Documentation/workqueue.txt. + * Documentation/core-api/workqueue.rst. */ enum { WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ @@ -370,7 +382,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, * @args...: args for @fmt * * Allocate a workqueue with the specified parameters. For detailed - * information on WQ_* flags, please refer to Documentation/workqueue.txt. + * information on WQ_* flags, please refer to + * Documentation/core-api/workqueue.rst. * * The __lock_name macro dance is to guarantee that single lock_class_key * doesn't end up with different namesm, which isn't allowed by lockdep. @@ -590,14 +603,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork, return queue_delayed_work(system_wq, dwork, delay); } -/** - * keventd_up - is workqueue initialized yet? - */ -static inline bool keventd_up(void) -{ - return system_wq != NULL; -} - #ifndef CONFIG_SMP static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) { @@ -632,4 +637,7 @@ int workqueue_online_cpu(unsigned int cpu); int workqueue_offline_cpu(unsigned int cpu); #endif +int __init workqueue_init_early(void); +int __init workqueue_init(void); + #endif diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 797100e10010..c78f9f0920b5 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -9,6 +9,9 @@ #include <linux/fs.h> #include <linux/flex_proportions.h> #include <linux/backing-dev-defs.h> +#include <linux/blk_types.h> + +struct bio; DECLARE_PER_CPU(int, dirty_throttle_leaks); @@ -100,6 +103,16 @@ struct writeback_control { #endif }; +static inline int wbc_to_write_flags(struct writeback_control *wbc) +{ + if (wbc->sync_mode == WB_SYNC_ALL) + return REQ_SYNC; + else if (wbc->for_kupdate || wbc->for_background) + return REQ_BACKGROUND; + + return 0; +} + /* * A wb_domain represents a domain that wb's (bdi_writeback's) belong to * and are measured against each other in. There always is one global diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 2bb5deb0012e..7b0066814fa0 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -120,7 +120,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) { ctx->task = current; - ctx->stamp = atomic_long_inc_return(&ww_class->stamp); + ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); ctx->acquired = 0; #ifdef CONFIG_DEBUG_MUTEXES ctx->ww_class = ww_class; |