diff options
Diffstat (limited to 'include/linux')
88 files changed, 832 insertions, 391 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d5dcebd7aad3..ca55ae00f8c9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -669,12 +669,14 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) return false; } -static inline const char * -acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +static inline struct acpi_device * +acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) { return NULL; } +static inline void acpi_dev_put(struct acpi_device *adev) {} + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 38cd77b39a64..723e4dfa1c14 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -26,6 +26,14 @@ #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) +/* + * PMCG model identifiers for use in smmu pmu driver. Please note + * that this is purely for the use of software and has nothing to + * do with hardware or with IORT specification. + */ +#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ +#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ + int iort_register_domain_token(int trans_id, phys_addr_t base, struct fwnode_handle *fw_node); void iort_deregister_domain_token(int trans_id); diff --git a/include/linux/bio.h b/include/linux/bio.h index bb6090aa165d..e584673c1881 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -120,19 +120,23 @@ static inline bool bio_full(struct bio *bio) return bio->bi_vcnt >= bio->bi_max_vecs; } -#define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \ - for (bv = bvec_init_iter_all(&iter_all); \ - (iter_all.done < (bvl)->bv_len) && \ - (mp_bvec_next_segment((bvl), &iter_all), 1); \ - iter_all.done += bv->bv_len, i += 1) +static inline bool bio_next_segment(const struct bio *bio, + struct bvec_iter_all *iter) +{ + if (iter->idx >= bio->bi_vcnt) + return false; + + bvec_advance(&bio->bi_io_vec[iter->idx], iter); + return true; +} /* * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it */ -#define bio_for_each_segment_all(bvl, bio, i, iter_all) \ - for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \ - mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all) +#define bio_for_each_segment_all(bvl, bio, i, iter) \ + for (i = 0, bvl = bvec_init_iter_all(&iter); \ + bio_next_segment((bio), &iter); i++) static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned bytes) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index cb2aa7ecafff..db29928de467 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -302,6 +302,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); bool blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request_sync(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio); bool blk_mq_queue_stopped(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5c58a3b2bf00..317ab30d2904 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -548,7 +548,6 @@ struct request_queue { struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; struct percpu_ref q_usage_counter; - struct list_head all_q_node; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f02367faa58d..944ccc310201 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -510,7 +510,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, } \ _out: \ rcu_read_unlock(); \ - preempt_enable_no_resched(); \ + preempt_enable(); \ _ret; \ }) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index f6275c4da13a..ff13cbc1887d 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -145,26 +145,33 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) { - iter_all->bv.bv_page = NULL; iter_all->done = 0; + iter_all->idx = 0; return &iter_all->bv; } -static inline void mp_bvec_next_segment(const struct bio_vec *bvec, - struct bvec_iter_all *iter_all) +static inline void bvec_advance(const struct bio_vec *bvec, + struct bvec_iter_all *iter_all) { struct bio_vec *bv = &iter_all->bv; - if (bv->bv_page) { + if (iter_all->done) { bv->bv_page = nth_page(bv->bv_page, 1); bv->bv_offset = 0; } else { - bv->bv_page = bvec->bv_page; - bv->bv_offset = bvec->bv_offset; + bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset / + PAGE_SIZE); + bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; } bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, bvec->bv_len - iter_all->done); + iter_all->done += bv->bv_len; + + if (iter_all->done == bvec->bv_len) { + iter_all->idx++; + iter_all->done = 0; + } } /* diff --git a/include/linux/clk.h b/include/linux/clk.h index d8bc1a856b39..f689fc58d7be 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -811,6 +811,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent) return true; } +static inline int clk_set_rate_range(struct clk *clk, unsigned long min, + unsigned long max) +{ + return 0; +} + +static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + static inline int clk_set_parent(struct clk *clk, struct clk *parent) { return 0; diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 445348facea9..d58aa0db05f9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, .line = __LINE__, \ }; \ ______r = !!(cond); \ - ______f.miss_hit[______r]++; \ + ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ ______r; \ })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 5041357d0297..732745f865b7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -137,9 +137,26 @@ static inline int disable_nonboot_cpus(void) return freeze_secondary_cpus(0); } extern void enable_nonboot_cpus(void); + +static inline int suspend_disable_secondary_cpus(void) +{ + int cpu = 0; + + if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) + cpu = -1; + + return freeze_secondary_cpus(cpu); +} +static inline void suspend_enable_secondary_cpus(void) +{ + return enable_nonboot_cpus(); +} + #else /* !CONFIG_PM_SLEEP_SMP */ static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} +static inline int suspend_disable_secondary_cpus(void) { return 0; } +static inline void suspend_enable_secondary_cpus(void) { } #endif /* !CONFIG_PM_SLEEP_SMP */ void cpu_startup_entry(enum cpuhp_state state); @@ -175,6 +192,7 @@ enum cpuhp_smt_control { CPU_SMT_DISABLED, CPU_SMT_FORCE_DISABLED, CPU_SMT_NOT_SUPPORTED, + CPU_SMT_NOT_IMPLEMENTED, }; #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) @@ -182,9 +200,33 @@ extern enum cpuhp_smt_control cpu_smt_control; extern void cpu_smt_disable(bool force); extern void cpu_smt_check_topology(void); #else -# define cpu_smt_control (CPU_SMT_ENABLED) +# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED) static inline void cpu_smt_disable(bool force) { } static inline void cpu_smt_check_topology(void) { } #endif +/* + * These are used for a global "mitigations=" cmdline option for toggling + * optional CPU mitigations. + */ +enum cpu_mitigations { + CPU_MITIGATIONS_OFF, + CPU_MITIGATIONS_AUTO, + CPU_MITIGATIONS_AUTO_NOSMT, +}; + +extern enum cpu_mitigations cpu_mitigations; + +/* mitigations=off */ +static inline bool cpu_mitigations_off(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_OFF; +} + +/* mitigations=auto,nosmt */ +static inline bool cpu_mitigations_auto_nosmt(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; +} + #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b160e98076e3..684caf067003 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } #endif +static inline bool policy_is_inactive(struct cpufreq_policy *policy) +{ + return cpumask_empty(policy->cpus); +} + static inline bool policy_is_shared(struct cpufreq_policy *policy) { return cpumask_weight(policy->cpus) > 1; @@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu); void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); + +struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); +void cpufreq_cpu_release(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); +int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_policy *new_policy); void cpufreq_update_policy(unsigned int cpu); +void cpufreq_update_limits(unsigned int cpu); bool have_governor_per_policy(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); @@ -322,6 +333,9 @@ struct cpufreq_driver { /* should be defined, if possible */ unsigned int (*get)(unsigned int cpu); + /* Called to update policy limits on firmware notifications. */ + void (*update_limits)(unsigned int cpu); + /* optional */ int (*bios_limit)(int cpu, unsigned int *limit); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index e78281d07b70..dbfdd0fadbef 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -147,6 +147,7 @@ enum cpuhp_state { CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, + CPUHP_AP_X86_INTEL_EPB_ONLINE, CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_UNCORE_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3b39472324a3..bb9a0db89f1a 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -83,6 +83,7 @@ struct cpuidle_device { unsigned int use_deepest_state:1; unsigned int poll_time_limit:1; unsigned int cpu; + ktime_t next_hrtimer; int last_residency; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; diff --git a/include/linux/cred.h b/include/linux/cred.h index ddd45bb74887..efb6edf32de7 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -138,7 +138,7 @@ struct cred { #ifdef CONFIG_KEYS unsigned char jit_keyring; /* default keyring to attach requested * keys to */ - struct key __rcu *session_keyring; /* keyring inherited over fork */ + struct key *session_keyring; /* keyring inherited over fork */ struct key *process_keyring; /* keyring private to this process */ struct key *thread_keyring; /* keyring private to this thread */ struct key *request_key_auth; /* assumed request_key authority */ diff --git a/include/linux/dmi.h b/include/linux/dmi.h index c46fdb36700b..8de8c4f15163 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -102,9 +102,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); extern const char * dmi_get_system_info(int field); extern const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from); -extern void dmi_scan_machine(void); -extern void dmi_memdev_walk(void); -extern void dmi_set_dump_stack_arch_desc(void); +extern void dmi_setup(void); extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); extern int dmi_get_bios_year(void); extern int dmi_name_in_vendors(const char *str); @@ -122,9 +120,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; static inline const char * dmi_get_system_info(int field) { return NULL; } static inline const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { return NULL; } -static inline void dmi_scan_machine(void) { return; } -static inline void dmi_memdev_walk(void) { } -static inline void dmi_set_dump_stack_arch_desc(void) { } +static inline void dmi_setup(void) { } static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) { if (yearp) diff --git a/include/linux/efi.h b/include/linux/efi.h index 54357a258b35..6ebc2098cfe1 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1611,7 +1611,12 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, struct screen_info *si, efi_guid_t *proto, unsigned long size); -bool efi_runtime_disabled(void); +#ifdef CONFIG_EFI +extern bool efi_runtime_disabled(void); +#else +static inline bool efi_runtime_disabled(void) { return true; } +#endif + extern void efi_call_virt_check_flags(unsigned long flags, const char *call); extern unsigned long efi_call_virt_save_flags(void); diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2e9e2763bf47..6e8bc53740f0 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -31,6 +31,7 @@ struct elevator_mq_ops { void (*exit_sched)(struct elevator_queue *); int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index e2f3b21cd72a..aa8bfd6f738c 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -449,6 +449,18 @@ static inline void eth_addr_dec(u8 *addr) } /** + * eth_addr_inc() - Increment the given MAC address. + * @addr: Pointer to a six-byte array containing Ethernet address to increment. + */ +static inline void eth_addr_inc(u8 *addr) +{ + u64 u = ether_addr_to_u64(addr); + + u++; + u64_to_ether_addr(u, addr); +} + +/** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure * @addr: Pointer to a six-byte array containing the Ethernet address diff --git a/include/linux/filter.h b/include/linux/filter.h index 6074aa064b54..7d3abde3f183 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -20,6 +20,7 @@ #include <linux/set_memory.h> #include <linux/kallsyms.h> #include <linux/if_vlan.h> +#include <linux/vmalloc.h> #include <net/sch_generic.h> @@ -503,7 +504,6 @@ struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ jit_requested:1,/* archs need to JIT the prog */ - undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ @@ -733,24 +733,15 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - fp->undo_set_mem = 1; + set_vm_flush_reset_perms(fp); set_memory_ro((unsigned long)fp, fp->pages); } -static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) -{ - if (fp->undo_set_mem) - set_memory_rw((unsigned long)fp, fp->pages); -} - static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { + set_vm_flush_reset_perms(hdr); set_memory_ro((unsigned long)hdr, hdr->pages); -} - -static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) -{ - set_memory_rw((unsigned long)hdr, hdr->pages); + set_memory_x((unsigned long)hdr, hdr->pages); } static inline struct bpf_binary_header * @@ -788,7 +779,6 @@ void __bpf_prog_free(struct bpf_prog *fp); static inline void bpf_prog_unlock_free(struct bpf_prog *fp) { - bpf_prog_unlock_ro(fp); __bpf_prog_free(fp); } diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index 5be5dab50b13..01684d935580 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -309,4 +309,23 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12 #define INTEL_SIP_SMC_RSU_UPDATE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE) + +/* + * Request INTEL_SIP_SMC_ECC_DBE + * + * Sync call used by service driver at EL1 to alert EL3 that a Double + * Bit ECC error has occurred. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ECC_DBE + * a1 SysManager Double Bit Error value + * a2-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + */ +#define INTEL_SIP_SMC_FUNCID_ECC_DBE 13 +#define INTEL_SIP_SMC_ECC_DBE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE) + #endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 730876187344..20899919ead8 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { #ifdef CONFIG_STACK_TRACER -#define STACK_TRACE_ENTRIES 500 - -struct stack_trace; - -extern unsigned stack_trace_index[]; -extern struct stack_trace stack_trace_max; -extern unsigned long stack_trace_max_size; -extern arch_spinlock_t stack_trace_max_lock; - extern int stack_tracer_enabled; -void stack_trace_print(void); -int -stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + +int stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); diff --git a/include/linux/hid.h b/include/linux/hid.h index f9707d1dcb58..ae9da674b749 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -382,6 +382,7 @@ struct hid_item { #define HID_GROUP_WACOM 0x0101 #define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 #define HID_GROUP_STEAM 0x0103 +#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104 /* * HID protocol status @@ -417,6 +418,7 @@ struct hid_global { struct hid_local { unsigned usage[HID_MAX_USAGES]; /* usage array */ + u8 usage_size[HID_MAX_USAGES]; /* usage size array */ unsigned collection_index[HID_MAX_USAGES]; /* collection index array */ unsigned usage_index; unsigned usage_minimum; @@ -893,7 +895,7 @@ struct hid_field *hidinput_get_led_field(struct hid_device *hid); unsigned int hidinput_count_leds(struct hid_device *hid); __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code); void hid_output_report(struct hid_report *report, __u8 *data); -void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype); +int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype); u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags); struct hid_device *hid_allocate_device(void); struct hid_report *hid_register_report(struct hid_device *device, diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 99e0c1b0b5fb..2b949fa501e1 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -40,6 +40,11 @@ enum hwmon_chip_attributes { hwmon_chip_register_tz, hwmon_chip_update_interval, hwmon_chip_alarms, + hwmon_chip_samples, + hwmon_chip_curr_samples, + hwmon_chip_in_samples, + hwmon_chip_power_samples, + hwmon_chip_temp_samples, }; #define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history) @@ -49,6 +54,11 @@ enum hwmon_chip_attributes { #define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz) #define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval) #define HWMON_C_ALARMS BIT(hwmon_chip_alarms) +#define HWMON_C_SAMPLES BIT(hwmon_chip_samples) +#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples) +#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples) +#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples) +#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples) enum hwmon_temp_attributes { hwmon_temp_input = 0, @@ -365,6 +375,14 @@ struct hwmon_channel_info { const u32 *config; }; +#define HWMON_CHANNEL_INFO(stype, ...) \ + (&(struct hwmon_channel_info) { \ + .type = hwmon_##stype, \ + .config = (u32 []) { \ + __VA_ARGS__, 0 \ + } \ + }) + /** * Chip configuration * @ops: Pointer to hwmon operations. diff --git a/include/linux/ima.h b/include/linux/ima.h index dc12fbcf484c..fd9f7cf4cdf5 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -31,7 +31,7 @@ extern void ima_post_path_mknod(struct dentry *dentry); extern void ima_add_kexec_buffer(struct kimage *image); #endif -#if defined(CONFIG_X86) && defined(CONFIG_EFI) +#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390) extern bool arch_ima_get_secureboot(void); extern const char * const *arch_get_ima_policy(void); #else diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h new file mode 100644 index 000000000000..16255c2ca2f4 --- /dev/null +++ b/include/linux/intel-ish-client-if.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel ISH client Interface definitions + * + * Copyright (c) 2019, Intel Corporation. + */ + +#ifndef _INTEL_ISH_CLIENT_IF_H_ +#define _INTEL_ISH_CLIENT_IF_H_ + +struct ishtp_cl_device; +struct ishtp_device; +struct ishtp_cl; +struct ishtp_fw_client; + +/* Client state */ +enum cl_state { + ISHTP_CL_INITIALIZING = 0, + ISHTP_CL_CONNECTING, + ISHTP_CL_CONNECTED, + ISHTP_CL_DISCONNECTING, + ISHTP_CL_DISCONNECTED +}; + +/** + * struct ishtp_cl_device - ISHTP device handle + * @driver: driver instance on a bus + * @name: Name of the device for probe + * @probe: driver callback for device probe + * @remove: driver callback on device removal + * + * Client drivers defines to get probed/removed for ISHTP client device. + */ +struct ishtp_cl_driver { + struct device_driver driver; + const char *name; + const guid_t *guid; + int (*probe)(struct ishtp_cl_device *dev); + int (*remove)(struct ishtp_cl_device *dev); + int (*reset)(struct ishtp_cl_device *dev); + const struct dev_pm_ops *pm; +}; + +/** + * struct ishtp_msg_data - ISHTP message data struct + * @size: Size of data in the *data + * @data: Pointer to data + */ +struct ishtp_msg_data { + uint32_t size; + unsigned char *data; +}; + +/* + * struct ishtp_cl_rb - request block structure + * @list: Link to list members + * @cl: ISHTP client instance + * @buffer: message header + * @buf_idx: Index into buffer + * @read_time: unused at this time + */ +struct ishtp_cl_rb { + struct list_head list; + struct ishtp_cl *cl; + struct ishtp_msg_data buffer; + unsigned long buf_idx; + unsigned long read_time; +}; + +int ishtp_cl_driver_register(struct ishtp_cl_driver *driver, + struct module *owner); +void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver); +int ishtp_register_event_cb(struct ishtp_cl_device *device, + void (*read_cb)(struct ishtp_cl_device *)); + +/* Get the device * from ishtp device instance */ +struct device *ishtp_device(struct ishtp_cl_device *cl_device); +/* Trace interface for clients */ +void *ishtp_trace_callback(struct ishtp_cl_device *cl_device); +/* Get device pointer of PCI device for DMA acces */ +struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device); + +struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device); +void ishtp_cl_free(struct ishtp_cl *cl); +int ishtp_cl_link(struct ishtp_cl *cl); +void ishtp_cl_unlink(struct ishtp_cl *cl); +int ishtp_cl_disconnect(struct ishtp_cl *cl); +int ishtp_cl_connect(struct ishtp_cl *cl); +int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length); +int ishtp_cl_flush_queues(struct ishtp_cl *cl); +int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb); +bool ishtp_cl_tx_empty(struct ishtp_cl *cl); +struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl); +void *ishtp_get_client_data(struct ishtp_cl *cl); +void ishtp_set_client_data(struct ishtp_cl *cl, void *data); +struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl); +void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size); +void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size); +void ishtp_set_connection_state(struct ishtp_cl *cl, int state); +void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id); + +void ishtp_put_device(struct ishtp_cl_device *cl_dev); +void ishtp_get_device(struct ishtp_cl_device *cl_dev); +void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data); +void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device); +int ishtp_register_event_cb(struct ishtp_cl_device *device, + void (*read_cb)(struct ishtp_cl_device *)); +struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev, + const guid_t *uuid); +int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client); +int ish_hw_reset(struct ishtp_device *dev); +#endif /* _INTEL_ISH_CLIENT_IF_H_ */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 690b238a44d5..c7eef32e7739 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -668,31 +668,6 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); -struct tasklet_hrtimer { - struct hrtimer timer; - struct tasklet_struct tasklet; - enum hrtimer_restart (*function)(struct hrtimer *); -}; - -extern void -tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, - enum hrtimer_restart (*function)(struct hrtimer *), - clockid_t which_clock, enum hrtimer_mode mode); - -static inline -void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, - const enum hrtimer_mode mode) -{ - hrtimer_start(&ttimer->timer, time, mode); -} - -static inline -void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) -{ - hrtimer_cancel(&ttimer->timer); - tasklet_kill(&ttimer->tasklet); -} - /* * Autoprobing for irqs: * diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0f919d5fe84f..c2ffff5f9ae2 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1606,7 +1606,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc, JBD_MAX_CHECKSUM_SIZE); desc.shash.tfm = journal->j_chksum_driver; - desc.shash.flags = 0; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index a49f2b45b3f0..42710d5949ba 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -12,21 +12,79 @@ struct static_key_deferred { struct delayed_work work; }; -extern void static_key_slow_dec_deferred(struct static_key_deferred *key); -extern void static_key_deferred_flush(struct static_key_deferred *key); +struct static_key_true_deferred { + struct static_key_true key; + unsigned long timeout; + struct delayed_work work; +}; + +struct static_key_false_deferred { + struct static_key_false key; + unsigned long timeout; + struct delayed_work work; +}; + +#define static_key_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) +#define static_branch_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) + +#define static_key_deferred_flush(x) \ + __static_key_deferred_flush((x), &(x)->work) + +extern void +__static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout); +extern void __static_key_deferred_flush(void *key, struct delayed_work *work); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); +extern void jump_label_update_timeout(struct work_struct *work); + +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { \ + .key = { STATIC_KEY_INIT_TRUE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { \ + .key = { STATIC_KEY_INIT_FALSE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) + #else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; +struct static_key_true_deferred { + struct static_key_true key; +}; +struct static_key_false_deferred { + struct static_key_false key; +}; +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT } +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT } + +#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key) + static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { STATIC_KEY_CHECK_USE(key); static_key_slow_dec(&key->key); } -static inline void static_key_deferred_flush(struct static_key_deferred *key) +static inline void static_key_deferred_flush(void *key) { STATIC_KEY_CHECK_USE(key); } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 34a5036debd3..2d14e21c16c0 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -47,8 +47,8 @@ #define u64_to_user_ptr(x) ( \ { \ - typecheck(u64, x); \ - (void __user *)(uintptr_t)x; \ + typecheck(u64, (x)); \ + (void __user *)(uintptr_t)(x); \ } \ ) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 201f0f2683f2..9a897256e481 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -173,6 +173,7 @@ struct kretprobe_instance { struct kretprobe *rp; kprobe_opcode_t *ret_addr; struct task_struct *task; + void *fp; char data[0]; }; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9d55c63db09b..640a03642766 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -28,6 +28,7 @@ #include <linux/irqbypass.h> #include <linux/swait.h> #include <linux/refcount.h> +#include <linux/nospec.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { - /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case - * the caller has read kvm->online_vcpus before (as is the case - * for kvm_for_each_vcpu, for example). - */ + int num_vcpus = atomic_read(&kvm->online_vcpus); + i = array_index_nospec(i, num_vcpus); + + /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ smp_rmb(); return kvm->vcpus[i]; } @@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) { + as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, lockdep_is_held(&kvm->slots_lock) || !refcount_read(&kvm->users_count)); diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 53551f470722..a14bab1a0a3e 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -86,7 +86,6 @@ struct klp_func { struct list_head node; struct list_head stack_node; unsigned long old_size, new_size; - bool kobj_added; bool nop; bool patched; bool transition; @@ -141,7 +140,6 @@ struct klp_object { struct list_head func_list; struct list_head node; struct module *mod; - bool kobj_added; bool dynamic; bool patched; }; @@ -170,7 +168,6 @@ struct klp_patch { struct list_head list; struct kobject kobj; struct list_head obj_list; - bool kobj_added; bool enabled; bool forced; struct work_struct free_work; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 79c3873d58ac..6e2377e6c1d6 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,6 +66,11 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; +struct lock_trace { + unsigned int nr_entries; + unsigned int offset; +}; + #define LOCKSTAT_POINTS 4 /* @@ -100,7 +105,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -188,7 +193,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct stack_trace trace; + struct lock_trace trace; int distance; /* @@ -471,7 +476,7 @@ struct pin_cookie { }; #define NIL_COOKIE (struct pin_cookie){ } -#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index a9b8ff578b6b..a240a3fc5fc4 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -127,7 +127,6 @@ * options cleanly (a filesystem may modify the data e.g. with strsep()). * This also allows the original mount data to be stripped of security- * specific options to avoid having to make filesystems aware of them. - * @type the type of filesystem being mounted. * @orig the original mount data copied from userspace. * @copy copied data which will be passed to the security module. * Returns 0 if the copy was successful. @@ -320,10 +319,11 @@ * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. * @path_chmod: - * Check for permission to change DAC's permission of a file or directory. - * @dentry contains the dentry structure. - * @mnt contains the vfsmnt structure. - * @mode contains DAC's mode. + * Check for permission to change a mode of the file @path. The new + * mode is specified in @mode. + * @path contains the path structure of the file to change the mode. + * @mode contains the new DAC's permission, which is a bitmask of + * constants from <include/uapi/linux/stat.h> * Return 0 if permission is granted. * @path_chown: * Check for permission to change owner/group of a file or directory. @@ -502,7 +502,7 @@ * Return 0 if permission is granted. * @file_lock: * Check permission before performing file locking operations. - * Note: this hook mediates both flock and fcntl style locks. + * Note the hook mediates both flock and fcntl style locks. * @file contains the file structure. * @cmd contains the posix-translated lock operation to perform * (e.g. F_RDLCK, F_WRLCK). @@ -645,12 +645,12 @@ * @p contains the task_struct of process. * @nice contains the new nice value. * Return 0 if permission is granted. - * @task_setioprio + * @task_setioprio: * Check permission before setting the ioprio value of @p to @ioprio. * @p contains the task_struct of process. * @ioprio contains the new ioprio value * Return 0 if permission is granted. - * @task_getioprio + * @task_getioprio: * Check permission before getting the ioprio value of @p. * @p contains the task_struct of process. * Return 0 if permission is granted. @@ -672,17 +672,15 @@ * Return 0 if permission is granted. * @task_setscheduler: * Check permission before setting scheduling policy and/or parameters of - * process @p based on @policy and @lp. + * process @p. * @p contains the task_struct for process. - * @policy contains the scheduling policy. - * @lp contains the scheduling parameters. * Return 0 if permission is granted. * @task_getscheduler: * Check permission before obtaining scheduling information for process * @p. * @p contains the task_struct for process. * Return 0 if permission is granted. - * @task_movememory + * @task_movememory: * Check permission before moving memory owned by process @p. * @p contains the task_struct for process. * Return 0 if permission is granted. @@ -769,9 +767,9 @@ * socket structure, but rather, the socket security information is stored * in the associated inode. Typically, the inode alloc_security hook will * allocate and and attach security information to - * sock->inode->i_security. This hook may be used to update the - * sock->inode->i_security field with additional information that wasn't - * available when the inode was allocated. + * SOCK_INODE(sock)->i_security. This hook may be used to update the + * SOCK_INODE(sock)->i_security field with additional information that + * wasn't available when the inode was allocated. * @sock contains the newly created socket structure. * @family contains the requested protocol family. * @type contains the requested communications type. @@ -876,13 +874,13 @@ * @socket_getpeersec_dgram: * This hook allows the security module to provide peer socket security * state for udp sockets on a per-packet basis to userspace via - * getsockopt SO_GETPEERSEC. The application must first have indicated - * the IP_PASSSEC option via getsockopt. It can then retrieve the + * getsockopt SO_GETPEERSEC. The application must first have indicated + * the IP_PASSSEC option via getsockopt. It can then retrieve the * security state returned by this hook for a packet via the SCM_SECURITY * ancillary message type. - * @skb is the skbuff for the packet being queried - * @secdata is a pointer to a buffer in which to copy the security data - * @seclen is the maximum length for @secdata + * @sock contains the peer socket. May be NULL. + * @skb is the sk_buff for the packet being queried. May be NULL. + * @secid pointer to store the secid of the packet. * Return 0 on success, error on failure. * @sk_alloc_security: * Allocate and attach a security structure to the sk->sk_security field, @@ -906,9 +904,9 @@ * @secmark_relabel_packet: * check if the process should be allowed to relabel packets to * the given secid - * @security_secmark_refcount_inc + * @secmark_refcount_inc: * tells the LSM to increment the number of secmark labeling rules loaded - * @security_secmark_refcount_dec + * @secmark_refcount_dec: * tells the LSM to decrement the number of secmark labeling rules loaded * @req_classify_flow: * Sets the flow's sid to the openreq sid. @@ -1113,41 +1111,41 @@ * * @msg_queue_alloc_security: * Allocate and attach a security structure to the - * msq->q_perm.security field. The security field is initialized to + * @perm->security field. The security field is initialized to * NULL when the structure is first created. - * @msq contains the message queue structure to be modified. + * @perm contains the IPC permissions of the message queue. * Return 0 if operation was successful and permission is granted. * @msg_queue_free_security: - * Deallocate security structure for this message queue. - * @msq contains the message queue structure to be modified. + * Deallocate security field @perm->security for the message queue. + * @perm contains the IPC permissions of the message queue. * @msg_queue_associate: * Check permission when a message queue is requested through the - * msgget system call. This hook is only called when returning the + * msgget system call. This hook is only called when returning the * message queue identifier for an existing message queue, not when a * new message queue is created. - * @msq contains the message queue to act upon. + * @perm contains the IPC permissions of the message queue. * @msqflg contains the operation control flags. * Return 0 if permission is granted. * @msg_queue_msgctl: * Check permission when a message control operation specified by @cmd - * is to be performed on the message queue @msq. - * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO. - * @msq contains the message queue to act upon. May be NULL. + * is to be performed on the message queue with permissions @perm. + * The @perm may be NULL, e.g. for IPC_INFO or MSG_INFO. + * @perm contains the IPC permissions of the msg queue. May be NULL. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @msg_queue_msgsnd: * Check permission before a message, @msg, is enqueued on the message - * queue, @msq. - * @msq contains the message queue to send message to. + * queue with permissions @perm. + * @perm contains the IPC permissions of the message queue. * @msg contains the message to be enqueued. * @msqflg contains operational flags. * Return 0 if permission is granted. * @msg_queue_msgrcv: * Check permission before a message, @msg, is removed from the message - * queue, @msq. The @target task structure contains a pointer to the + * queue. The @target task structure contains a pointer to the * process that will be receiving the message (not equal to the current * process when inline receives are being performed). - * @msq contains the message queue to retrieve message from. + * @perm contains the IPC permissions of the message queue. * @msg contains the message destination. * @target contains the task structure for recipient process. * @type contains the type of message requested. @@ -1157,34 +1155,34 @@ * Security hooks for System V Shared Memory Segments * * @shm_alloc_security: - * Allocate and attach a security structure to the shp->shm_perm.security - * field. The security field is initialized to NULL when the structure is + * Allocate and attach a security structure to the @perm->security + * field. The security field is initialized to NULL when the structure is * first created. - * @shp contains the shared memory structure to be modified. + * @perm contains the IPC permissions of the shared memory structure. * Return 0 if operation was successful and permission is granted. * @shm_free_security: - * Deallocate the security struct for this memory segment. - * @shp contains the shared memory structure to be modified. + * Deallocate the security structure @perm->security for the memory segment. + * @perm contains the IPC permissions of the shared memory structure. * @shm_associate: * Check permission when a shared memory region is requested through the - * shmget system call. This hook is only called when returning the shared + * shmget system call. This hook is only called when returning the shared * memory region identifier for an existing region, not when a new shared * memory region is created. - * @shp contains the shared memory structure to be modified. + * @perm contains the IPC permissions of the shared memory structure. * @shmflg contains the operation control flags. * Return 0 if permission is granted. * @shm_shmctl: * Check permission when a shared memory control operation specified by - * @cmd is to be performed on the shared memory region @shp. - * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO. - * @shp contains shared memory structure to be modified. + * @cmd is to be performed on the shared memory region with permissions @perm. + * The @perm may be NULL, e.g. for IPC_INFO or SHM_INFO. + * @perm contains the IPC permissions of the shared memory structure. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @shm_shmat: * Check permissions prior to allowing the shmat system call to attach the - * shared memory segment @shp to the data segment of the calling process. - * The attaching address is specified by @shmaddr. - * @shp contains the shared memory structure to be modified. + * shared memory segment with permissions @perm to the data segment of the + * calling process. The attaching address is specified by @shmaddr. + * @perm contains the IPC permissions of the shared memory structure. * @shmaddr contains the address to attach memory region to. * @shmflg contains the operational flags. * Return 0 if permission is granted. @@ -1192,34 +1190,34 @@ * Security hooks for System V Semaphores * * @sem_alloc_security: - * Allocate and attach a security structure to the sma->sem_perm.security - * field. The security field is initialized to NULL when the structure is + * Allocate and attach a security structure to the @perm->security + * field. The security field is initialized to NULL when the structure is * first created. - * @sma contains the semaphore structure + * @perm contains the IPC permissions of the semaphore. * Return 0 if operation was successful and permission is granted. * @sem_free_security: - * deallocate security struct for this semaphore - * @sma contains the semaphore structure. + * Deallocate security structure @perm->security for the semaphore. + * @perm contains the IPC permissions of the semaphore. * @sem_associate: * Check permission when a semaphore is requested through the semget - * system call. This hook is only called when returning the semaphore + * system call. This hook is only called when returning the semaphore * identifier for an existing semaphore, not when a new one must be * created. - * @sma contains the semaphore structure. + * @perm contains the IPC permissions of the semaphore. * @semflg contains the operation control flags. * Return 0 if permission is granted. * @sem_semctl: * Check permission when a semaphore operation specified by @cmd is to be - * performed on the semaphore @sma. The @sma may be NULL, e.g. for + * performed on the semaphore. The @perm may be NULL, e.g. for * IPC_INFO or SEM_INFO. - * @sma contains the semaphore structure. May be NULL. + * @perm contains the IPC permissions of the semaphore. May be NULL. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @sem_semop: * Check permissions before performing operations on members of the - * semaphore set @sma. If the @alter flag is nonzero, the semaphore set + * semaphore set. If the @alter flag is nonzero, the semaphore set * may be modified. - * @sma contains the semaphore structure. + * @perm contains the IPC permissions of the semaphore. * @sops contains the operations to perform. * @nsops contains the number of operations to perform. * @alter contains the flag indicating whether changes are to be made. @@ -1292,13 +1290,12 @@ * Check permission before accessing the kernel message ring or changing * logging to the console. * See the syslog(2) manual page for an explanation of the @type values. - * @type contains the type of action. - * @from_file indicates the context of action (if it came from /proc). + * @type contains the SYSLOG_ACTION_* constant from <include/linux/syslog.h> * Return 0 if permission is granted. * @settime: * Check permission to change the system time. - * struct timespec64 is defined in include/linux/time64.h and timezone - * is defined in include/linux/time.h + * struct timespec64 is defined in <include/linux/time64.h> and timezone + * is defined in <include/linux/time.h> * @ts contains new time * @tz contains new timezone * Return 0 if permission is granted. @@ -1340,7 +1337,7 @@ * @audit_rule_init: * Allocate and initialize an LSM audit rule structure. * @field contains the required Audit action. - * Fields flags are defined in include/linux/audit.h + * Fields flags are defined in <include/linux/audit.h> * @op contains the operator the rule uses. * @rulestr contains the context where the rule will be applied to. * @lsmrule contains a pointer to receive the result. @@ -1348,9 +1345,9 @@ * -EINVAL in case of an invalid rule. * * @audit_rule_known: - * Specifies whether given @rule contains any fields related to + * Specifies whether given @krule contains any fields related to * current LSM. - * @rule contains the audit rule of interest. + * @krule contains the audit rule of interest. * Return 1 in case of relation found, 0 otherwise. * * @audit_rule_match: @@ -1359,13 +1356,13 @@ * @secid contains the security id in question. * @field contains the field which relates to current LSM. * @op contains the operator that will be used for matching. - * @rule points to the audit rule that will be checked against. + * @lrule points to the audit rule that will be checked against. * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. * * @audit_rule_free: * Deallocate the LSM audit rule structure previously allocated by * audit_rule_init. - * @rule contains the allocated rule + * @lsmrule contains the allocated rule * * @inode_invalidate_secctx: * Notify the security module that it must revalidate the security context @@ -1378,9 +1375,7 @@ * this hook to initialize the security context in its incore inode to the * value provided by the server for the file when the server returned the * file's attributes to the client. - * * Must be called with inode->i_mutex locked. - * * @inode we wish to set the security context of. * @ctx contains the string which we wish to set in the inode. * @ctxlen contains the length of @ctx. @@ -1393,9 +1388,7 @@ * this hook to change the security context in its incore inode and on the * backing filesystem to a value provided by the client on a SETATTR * operation. - * * Must be called with inode->i_mutex locked. - * * @dentry contains the inode we wish to set the security context of. * @ctx contains the string which we wish to set in the inode. * @ctxlen contains the length of @ctx. @@ -1403,7 +1396,6 @@ * @inode_getsecctx: * On success, returns 0 and fills out @ctx and @ctxlen with the security * context for the given @inode. - * * @inode we wish to get the security context of. * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. @@ -1640,28 +1632,28 @@ union security_list_options { int (*msg_msg_alloc_security)(struct msg_msg *msg); void (*msg_msg_free_security)(struct msg_msg *msg); - int (*msg_queue_alloc_security)(struct kern_ipc_perm *msq); - void (*msg_queue_free_security)(struct kern_ipc_perm *msq); - int (*msg_queue_associate)(struct kern_ipc_perm *msq, int msqflg); - int (*msg_queue_msgctl)(struct kern_ipc_perm *msq, int cmd); - int (*msg_queue_msgsnd)(struct kern_ipc_perm *msq, struct msg_msg *msg, + int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm); + void (*msg_queue_free_security)(struct kern_ipc_perm *perm); + int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg); + int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd); + int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg, int msqflg); - int (*msg_queue_msgrcv)(struct kern_ipc_perm *msq, struct msg_msg *msg, + int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg, struct task_struct *target, long type, int mode); - int (*shm_alloc_security)(struct kern_ipc_perm *shp); - void (*shm_free_security)(struct kern_ipc_perm *shp); - int (*shm_associate)(struct kern_ipc_perm *shp, int shmflg); - int (*shm_shmctl)(struct kern_ipc_perm *shp, int cmd); - int (*shm_shmat)(struct kern_ipc_perm *shp, char __user *shmaddr, + int (*shm_alloc_security)(struct kern_ipc_perm *perm); + void (*shm_free_security)(struct kern_ipc_perm *perm); + int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg); + int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd); + int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr, int shmflg); - int (*sem_alloc_security)(struct kern_ipc_perm *sma); - void (*sem_free_security)(struct kern_ipc_perm *sma); - int (*sem_associate)(struct kern_ipc_perm *sma, int semflg); - int (*sem_semctl)(struct kern_ipc_perm *sma, int cmd); - int (*sem_semop)(struct kern_ipc_perm *sma, struct sembuf *sops, + int (*sem_alloc_security)(struct kern_ipc_perm *perm); + void (*sem_free_security)(struct kern_ipc_perm *perm); + int (*sem_associate)(struct kern_ipc_perm *perm, int semflg); + int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd); + int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops, unsigned nsops, int alter); int (*netlink_send)(struct sock *sk, struct sk_buff *skb); diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 75e5c8ff85fc..c34d5f0d34d7 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -553,7 +553,6 @@ struct palmas_pmic { struct palmas *palmas; struct device *dev; struct regulator_desc desc[PALMAS_NUM_REGS]; - struct regulator_dev *rdev[PALMAS_NUM_REGS]; struct mutex mutex; int smps123; diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h index 955d30fc6a27..30c587a0624c 100644 --- a/include/linux/mfd/wm831x/regulator.h +++ b/include/linux/mfd/wm831x/regulator.h @@ -1213,6 +1213,6 @@ #define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ #define WM831X_ISINK_MAX_ISEL 55 -extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; +extern const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; #endif diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h index 4ee908f5b834..43d0d307e2e3 100644 --- a/include/linux/mfd/wm8400-private.h +++ b/include/linux/mfd/wm8400-private.h @@ -923,12 +923,4 @@ struct wm8400 { #define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */ #define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */ -int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); - -static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, - u16 mask, u16 val) -{ - return regmap_update_bits(wm8400->regmap, reg, mask, val); -} - #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index 76769749b5a5..083d7b4863ed 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -966,6 +966,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page) } #endif /* CONFIG_DEV_PAGEMAP_OPS */ +/* 127: arbitrary random number, small enough to assemble well */ +#define page_ref_zero_or_close_to_overflow(page) \ + ((unsigned int) page_ref_count(page) + 127u <= 127u) + static inline void get_page(struct page *page) { page = compound_head(page); @@ -973,10 +977,19 @@ static inline void get_page(struct page *page) * Getting a normal page or the head of a compound page * requires to already have an elevated page->_refcount. */ - VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); + VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); page_ref_inc(page); } +static inline __must_check bool try_get_page(struct page *page) +{ + page = compound_head(page); + if (WARN_ON_ONCE(page_ref_count(page) <= 0)) + return false; + page_ref_inc(page); + return true; +} + static inline void put_page(struct page *page) { page = compound_head(page); @@ -2597,37 +2610,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages, int enable) { } #endif -#ifdef CONFIG_DEBUG_PAGEALLOC extern bool _debug_pagealloc_enabled; -extern void __kernel_map_pages(struct page *page, int numpages, int enable); static inline bool debug_pagealloc_enabled(void) { - return _debug_pagealloc_enabled; + return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled; } +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) +extern void __kernel_map_pages(struct page *page, int numpages, int enable); + static inline void kernel_map_pages(struct page *page, int numpages, int enable) { - if (!debug_pagealloc_enabled()) - return; - __kernel_map_pages(page, numpages, enable); } #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); #endif /* CONFIG_HIBERNATION */ -#else /* CONFIG_DEBUG_PAGEALLOC */ +#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ static inline void kernel_map_pages(struct page *page, int numpages, int enable) {} #ifdef CONFIG_HIBERNATION static inline bool kernel_page_present(struct page *page) { return true; } #endif /* CONFIG_HIBERNATION */ -static inline bool debug_pagealloc_enabled(void) -{ - return false; -} -#endif /* CONFIG_DEBUG_PAGEALLOC */ +#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifdef __HAVE_ARCH_GATE_AREA extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 26f69cf763f4..324e872c91d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1500,6 +1500,7 @@ struct net_device_ops { * @IFF_FAILOVER: device is a failover master device * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device + * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1532,6 +1533,7 @@ enum netdev_priv_flags { IFF_FAILOVER = 1<<27, IFF_FAILOVER_SLAVE = 1<<28, IFF_L3MDEV_RX_HANDLER = 1<<29, + IFF_LIVE_RENAME_OK = 1<<30, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1563,6 +1565,7 @@ enum netdev_priv_flags { #define IFF_FAILOVER IFF_FAILOVER #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER +#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK /** * struct net_device - The DEVICE structure. diff --git a/include/linux/nvme.h b/include/linux/nvme.h index baa49e6a23cc..c40720cb59ac 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -967,8 +967,13 @@ struct nvme_get_log_page_command { __le16 numdl; __le16 numdu; __u16 rsvd11; - __le32 lpol; - __le32 lpou; + union { + struct { + __le32 lpol; + __le32 lpou; + }; + __le64 lpo; + }; __u32 rsvd14[2]; }; diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index d2fa9ca42e9a..7f30446348c4 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -93,6 +93,24 @@ enum OID { OID_authorityKeyIdentifier, /* 2.5.29.35 */ OID_extKeyUsage, /* 2.5.29.37 */ + /* EC-RDSA */ + OID_gostCPSignA, /* 1.2.643.2.2.35.1 */ + OID_gostCPSignB, /* 1.2.643.2.2.35.2 */ + OID_gostCPSignC, /* 1.2.643.2.2.35.3 */ + OID_gost2012PKey256, /* 1.2.643.7.1.1.1.1 */ + OID_gost2012PKey512, /* 1.2.643.7.1.1.1.2 */ + OID_gost2012Digest256, /* 1.2.643.7.1.1.2.2 */ + OID_gost2012Digest512, /* 1.2.643.7.1.1.2.3 */ + OID_gost2012Signature256, /* 1.2.643.7.1.1.3.2 */ + OID_gost2012Signature512, /* 1.2.643.7.1.1.3.3 */ + OID_gostTC26Sign256A, /* 1.2.643.7.1.2.1.1.1 */ + OID_gostTC26Sign256B, /* 1.2.643.7.1.2.1.1.2 */ + OID_gostTC26Sign256C, /* 1.2.643.7.1.2.1.1.3 */ + OID_gostTC26Sign256D, /* 1.2.643.7.1.2.1.1.4 */ + OID_gostTC26Sign512A, /* 1.2.643.7.1.2.1.2.1 */ + OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */ + OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */ + OID__NR }; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e47ef764f613..15a82ff0aefe 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -240,7 +240,6 @@ struct perf_event; #define PERF_PMU_CAP_NO_INTERRUPT 0x01 #define PERF_PMU_CAP_NO_NMI 0x02 #define PERF_PMU_CAP_AUX_NO_SG 0x04 -#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 #define PERF_PMU_CAP_EXCLUSIVE 0x10 #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 @@ -464,7 +463,7 @@ enum perf_addr_filter_action_t { /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage - * @inode: object file's inode for file-based filters + * @path: object file's path for file-based filters * @offset: filter range offset * @size: filter range size (size==0 means single address trigger) * @action: filter/start/stop @@ -888,6 +887,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); + +extern void perf_pmu_resched(struct pmu *pmu); + extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); @@ -1055,12 +1057,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo #endif /* - * Take a snapshot of the regs. Skip ip and frame pointer to - * the nth caller. We only need a few of the regs: + * When generating a perf sample in-line, instead of from an interrupt / + * exception, we lack a pt_regs. This is typically used from software events + * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. + * + * We typically don't need a full set, but (for x86) do require: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests - * - bp for callchains - * - eflags, for future purposes, just in case + * - sp for PERF_SAMPLE_CALLCHAIN + * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) + * + * NOTE: assumes @regs is otherwise already 0 filled; this is important for + * things like PERF_SAMPLE_REGS_INTR. */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 787d224ff43e..5c626fdc10db 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -101,18 +101,20 @@ struct pipe_buf_operations { /* * Get a reference to the pipe buffer. */ - void (*get)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); }; /** * pipe_buf_get - get a reference to a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to + * + * Return: %true if the reference was successfully obtained. */ -static inline void pipe_buf_get(struct pipe_inode_info *pipe, +static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - buf->ops->get(pipe, buf); + return buf->ops->get(pipe, buf); } /** @@ -171,9 +173,10 @@ struct pipe_inode_info *alloc_pipe_info(void); void free_pipe_info(struct pipe_inode_info *); /* Generic pipe buffer ops functions */ -void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); +bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h index 3245f45f9d77..a3370a007702 100644 --- a/include/linux/platform_data/ads7828.h +++ b/include/linux/platform_data/ads7828.h @@ -4,7 +4,7 @@ * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot <vivien.didelot@savoirfairelinux.com> * - * For further information, see the Documentation/hwmon/ads7828 file. + * For further information, see the Documentation/hwmon/ads7828.rst file. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/platform_data/ds620.h b/include/linux/platform_data/ds620.h index 6ef58bb77e46..f0ce22a78bb8 100644 --- a/include/linux/platform_data/ds620.h +++ b/include/linux/platform_data/ds620.h @@ -14,7 +14,7 @@ struct ds620_platform_data { * 1 = PO_LOW * 2 = PO_HIGH * - * (see Documentation/hwmon/ds620) + * (see Documentation/hwmon/ds620.rst) */ int pomode; }; diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9f0aa1b48c78..dde59fd3590f 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h @@ -7,7 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * For further information, see the Documentation/hwmon/ina2xx file. + * For further information, see the Documentation/hwmon/ina2xx.rst file. */ /** diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h index 8da8f94ee15c..2bbd0919bc89 100644 --- a/include/linux/platform_data/max197.h +++ b/include/linux/platform_data/max197.h @@ -8,7 +8,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * For further information, see the Documentation/hwmon/max197 file. + * For further information, see the Documentation/hwmon/max197.rst file. */ #ifndef _PDATA_MAX197_H diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h index ee03d429742b..5fa115d3ea4b 100644 --- a/include/linux/platform_data/ntc_thermistor.h +++ b/include/linux/platform_data/ntc_thermistor.h @@ -42,7 +42,7 @@ struct ntc_thermistor_platform_data { * read_uV() * * How to setup pullup_ohm, pulldown_ohm, and connect is - * described at Documentation/hwmon/ntc_thermistor + * described at Documentation/hwmon/ntc_thermistor.rst * * pullup/down_ohm: 0 for infinite / not-connected * diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h index eb16c6739ac2..b439f2a896e0 100644 --- a/include/linux/platform_data/spi-ep93xx.h +++ b/include/linux/platform_data/spi-ep93xx.h @@ -6,13 +6,9 @@ struct spi_device; /** * struct ep93xx_spi_info - EP93xx specific SPI descriptor - * @chipselect: array of gpio numbers to use as chip selects - * @num_chipselect: ARRAY_SIZE(chipselect) * @use_dma: use DMA for the transfers */ struct ep93xx_spi_info { - int *chipselect; - int num_chipselect; bool use_dma; }; diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h index 3ab892208343..7a37ac27d0fb 100644 --- a/include/linux/platform_data/x86/clk-pmc-atom.h +++ b/include/linux/platform_data/x86/clk-pmc-atom.h @@ -35,10 +35,13 @@ struct pmc_clk { * * @base: PMC clock register base offset * @clks: pointer to set of registered clocks, typically 0..5 + * @critical: flag to indicate if firmware enabled pmc_plt_clks + * should be marked as critial or not */ struct pmc_clk_data { void __iomem *base; const struct pmc_clk *clks; + bool critical; }; #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 1ed5874bcee0..0e8e356bed6a 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -16,6 +16,7 @@ #include <linux/of.h> #include <linux/notifier.h> #include <linux/spinlock.h> +#include <linux/cpumask.h> /* * Flags to control the behaviour of a genpd. @@ -42,11 +43,22 @@ * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered * on, in case any of its attached devices is used * in the wakeup path to serve system wakeups. + * + * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get + * devices attached, which may belong to CPUs or + * possibly have subdomains with CPUs attached. + * This flag enables the genpd backend driver to + * deploy idle power management support for CPUs + * and groups of CPUs. Note that, the backend + * driver must then comply with the so called, + * last-man-standing algorithm, for the CPUs in the + * PM domain. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) #define GENPD_FLAG_ALWAYS_ON (1U << 2) #define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) +#define GENPD_FLAG_CPU_DOMAIN (1U << 4) enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -69,6 +81,7 @@ struct genpd_power_state { s64 residency_ns; struct fwnode_handle *fwnode; ktime_t idle_time; + void *data; }; struct genpd_lock_ops; @@ -93,6 +106,7 @@ struct generic_pm_domain { unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ unsigned int performance_state; /* Aggregated max performance state */ + cpumask_var_t cpus; /* A cpumask of the attached CPUs */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); struct opp_table *opp_table; /* OPP table of the genpd */ @@ -104,15 +118,17 @@ struct generic_pm_domain { s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; bool cached_power_down_ok; + bool cached_power_down_state_idx; int (*attach_dev)(struct generic_pm_domain *domain, struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ struct genpd_power_state *states; + void (*free_states)(struct genpd_power_state *states, + unsigned int state_count); unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ - void *free; /* Free the state that was allocated for default */ ktime_t on_time; ktime_t accounting_time; const struct genpd_lock_ops *lock_ops; @@ -159,6 +175,7 @@ struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; struct notifier_block nb; + int cpu; unsigned int performance_state; void *data; }; @@ -187,6 +204,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; +#ifdef CONFIG_CPU_IDLE +extern struct dev_power_governor pm_domain_cpu_gov; +#endif #else static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 24c757a32a7b..b150fe97ce5a 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -102,6 +102,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); +struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, + unsigned long u_volt); struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq); @@ -207,6 +209,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, return ERR_PTR(-ENOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, + unsigned long u_volt) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { diff --git a/include/linux/printk.h b/include/linux/printk.h index d7c77ed1a4cb..84ea4d094af3 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -347,7 +347,7 @@ extern int kptr_restrict; #ifdef CONFIG_PRINTK #define printk_once(fmt, ...) \ ({ \ - static bool __print_once __read_mostly; \ + static bool __section(.data.once) __print_once; \ bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ @@ -358,7 +358,7 @@ extern int kptr_restrict; }) #define printk_deferred_once(fmt, ...) \ ({ \ - static bool __print_once __read_mostly; \ + static bool __section(.data.once) __print_once; \ bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ diff --git a/include/linux/property.h b/include/linux/property.h index 65d3420dd5d1..a29369c89e6e 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -13,6 +13,7 @@ #ifndef _LINUX_PROPERTY_H_ #define _LINUX_PROPERTY_H_ +#include <linux/bits.h> #include <linux/fwnode.h> #include <linux/types.h> @@ -304,6 +305,23 @@ struct fwnode_handle * fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, u32 endpoint); +/* + * Fwnode lookup flags + * + * @FWNODE_GRAPH_ENDPOINT_NEXT: In the case of no exact match, look for the + * closest endpoint ID greater than the specified + * one. + * @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote + * endpoint of the given endpoint belongs to, + * may be disabled. + */ +#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0) +#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1) + +struct fwnode_handle * +fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, + u32 port, u32 endpoint, unsigned long flags); + #define fwnode_graph_for_each_endpoint(fwnode, child) \ for (child = NULL; \ (child = fwnode_graph_get_next_endpoint(fwnode, child)); ) diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 827c601841c4..6f89fc8d4b8e 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -5,8 +5,7 @@ * * Author: Brijesh Singh <brijesh.singh@amd.com> * - * SEV spec 0.14 is available at: - * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf + * SEV API spec is available at https://developer.amd.com/sev * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f6165d304b4d..48841e5dab90 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1338,7 +1338,6 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) } /* Let SB update */ - mmiowb(); return rc; } @@ -1374,7 +1373,6 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info, /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ - mmiowb(); barrier(); } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6cdb1db776cf..922bb6848813 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp) static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { - if (READ_ONCE(rhp->func) == f) + rcu_callback_t func = READ_ONCE(rhp->func); + + if (func == f) return true; - WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L); + WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 90bfa3279a01..563290fc194f 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -18,7 +18,7 @@ * awoken. */ struct rcuwait { - struct task_struct *task; + struct task_struct __rcu *task; }; #define __RCUWAIT_INITIALIZER(name) \ diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index f3f76051e8b0..aaf3cee70439 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -478,6 +478,11 @@ static inline int regulator_is_supported_voltage(struct regulator *regulator, return 0; } +static inline unsigned int regulator_get_linear_step(struct regulator *regulator) +{ + return 0; +} + static inline int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA) { diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h deleted file mode 100644 index e47568363e5e..000000000000 --- a/include/linux/rwsem-spinlock.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* rwsem-spinlock.h: fallback C implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de> - * - Derived also from comments by Linus - */ - -#ifndef _LINUX_RWSEM_SPINLOCK_H -#define _LINUX_RWSEM_SPINLOCK_H - -#ifndef _LINUX_RWSEM_H -#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ -/* - * the rw-semaphore definition - * - if count is 0 then there are no active readers or writers - * - if count is +ve then that is the number of active readers - * - if count is -1 then there is one active writer - * - if wait_list is not empty, then there are processes waiting for the semaphore - */ -struct rw_semaphore { - __s32 count; - raw_spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define RWSEM_UNLOCKED_VALUE 0x00000000 - -extern void __down_read(struct rw_semaphore *sem); -extern int __must_check __down_read_killable(struct rw_semaphore *sem); -extern int __down_read_trylock(struct rw_semaphore *sem); -extern void __down_write(struct rw_semaphore *sem); -extern int __must_check __down_write_killable(struct rw_semaphore *sem); -extern int __down_write_trylock(struct rw_semaphore *sem); -extern void __up_read(struct rw_semaphore *sem); -extern void __up_write(struct rw_semaphore *sem); -extern void __downgrade_write(struct rw_semaphore *sem); -extern int rwsem_is_locked(struct rw_semaphore *sem); - -#endif /* __KERNEL__ */ -#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 67dbb57508b1..2ea18a3def04 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -20,25 +20,30 @@ #include <linux/osq_lock.h> #endif -struct rw_semaphore; - -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include <linux/rwsem-spinlock.h> /* use a generic implementation */ -#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE -#else -/* All arch specific implementations share the same struct */ +/* + * For an uncontended rwsem, count and owner are the only fields a task + * needs to touch when acquiring the rwsem. So they are put next to each + * other to increase the chance that they will share the same cacheline. + * + * In a contended rwsem, the owner is likely the most frequently accessed + * field in the structure as the optimistic waiter that holds the osq lock + * will spin on owner. For an embedded rwsem, other hot fields in the + * containing structure should be moved further away from the rwsem to + * reduce the chance that they will share the same cacheline causing + * cacheline bouncing problem. + */ struct rw_semaphore { atomic_long_t count; - struct list_head wait_list; - raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER - struct optimistic_spin_queue osq; /* spinner MCS lock */ /* * Write owner. Used as a speculative check to see * if the owner is running on the cpu. */ struct task_struct *owner; + struct optimistic_spin_queue osq; /* spinner MCS lock */ #endif + raw_spinlock_t wait_lock; + struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -50,24 +55,14 @@ struct rw_semaphore { */ #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -/* Include the arch specific part */ -#include <asm/rwsem.h> - /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { return atomic_long_read(&sem->count) != 0; } +#define RWSEM_UNLOCKED_VALUE 0L #define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) -#endif /* Common initializer macros and functions */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1549584a1538..50606a6e73d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1057,7 +1057,6 @@ struct task_struct { #ifdef CONFIG_RSEQ struct rseq __user *rseq; - u32 rseq_len; u32 rseq_sig; /* * RmW on rseq_event_mask must be performed atomically @@ -1855,12 +1854,10 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) { if (clone_flags & CLONE_THREAD) { t->rseq = NULL; - t->rseq_len = 0; t->rseq_sig = 0; t->rseq_event_mask = 0; } else { t->rseq = current->rseq; - t->rseq_len = current->rseq_len; t->rseq_sig = current->rseq_sig; t->rseq_event_mask = current->rseq_event_mask; } @@ -1869,7 +1866,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) static inline void rseq_execve(struct task_struct *t) { t->rseq = NULL; - t->rseq_len = 0; t->rseq_sig = 0; t->rseq_event_mask = 0; } diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 0cd9f10423fb..a3fda9f024c3 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } +/* + * This has to be called after a get_task_mm()/mmget_not_zero() + * followed by taking the mmap_sem for writing before modifying the + * vmas or anything the coredump pretends not to change from under it. + * + * NOTE: find_extend_vma() called from GUP context is the only place + * that can modify the "mm" (notably the vm_start/end) under mmap_sem + * for reading and outside the context of the process, so it is also + * the only case that holds the mmap_sem for reading that must call + * this function. Generally if the mmap_sem is hold for reading + * there's no need of this check after get_task_mm()/mmget_not_zero(). + * + * This function can be obsoleted and the check can be removed, after + * the coredump code will hold the mmap_sem for writing before + * invoking the ->core_dump methods. + */ +static inline bool mmget_still_valid(struct mm_struct *mm) +{ + return likely(!mm->core_state); +} + /** * mmget() - Pin the address space associated with a &struct mm_struct. * @mm: The address space to pin. diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 2e97a2227045..f1227f2c38a4 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -76,6 +76,7 @@ extern void exit_itimers(struct signal_struct *); extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); +struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 57c7ed3fe465..cfc0a89a7159 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -76,8 +76,8 @@ struct sched_domain_shared { struct sched_domain { /* These fields must be setup */ - struct sched_domain *parent; /* top domain must be null terminated */ - struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_domain __rcu *parent; /* top domain must be null terminated */ + struct sched_domain __rcu *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index c7b5f86b91a1..468d2565a9fe 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -31,6 +31,13 @@ struct user_struct { atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ #ifdef CONFIG_KEYS + /* + * These pointers can only change from NULL to a non-NULL value once. + * Writes are protected by key_user_keyring_mutex. + * Unlocked readers should use READ_ONCE() unless they know that + * install_user_keyrings() has been called successfully (which sets + * these members to non-NULL values, preventing further modifications). + */ struct key *uid_keyring; /* UID specific keyring */ struct key *session_keyring; /* UID's default session keyring */ #endif diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 2a986d282a97..b5071497b8cb 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -17,6 +17,17 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif +#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP +static inline int set_direct_map_invalid_noflush(struct page *page) +{ + return 0; +} +static inline int set_direct_map_default_noflush(struct page *page) +{ + return 0; +} +#endif + #ifndef set_mce_nospec static inline int set_mce_nospec(unsigned long pfn) { diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f3fb1edb3526..20d815a33145 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -21,6 +21,7 @@ struct shmem_inode_info { struct list_head swaplist; /* chain of maybes on swap */ struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ + atomic_t stop_eviction; /* hold when working on inode */ struct inode vfs_inode; }; diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index d0884b525001..9d1bc65d226c 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -29,7 +29,7 @@ struct smpboot_thread_data; * @thread_comm: The base name of the thread */ struct smp_hotplug_thread { - struct task_struct __percpu **store; + struct task_struct * __percpu *store; struct list_head list; int (*thread_should_run)(unsigned int cpu); void (*thread_fn)(unsigned int cpu); diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index c1c59473cef9..6005f0126631 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -25,6 +25,7 @@ struct dma_chan; struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; + u8 dma_burst_size; bool is_slave; /* DMA engine specific config */ diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 3703d0dcac2e..af9ff2f0f1b2 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -295,6 +295,10 @@ int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, const struct spi_mem_op *op, struct sg_table *sg); + +bool spi_mem_default_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op); + #else static inline int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, @@ -310,6 +314,14 @@ spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, struct sg_table *sg) { } + +static inline +bool spi_mem_default_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + return false; +} + #endif /* CONFIG_SPI_MEM */ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 662b336aa2e4..053abd22ad31 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -143,7 +143,7 @@ struct spi_device { u32 max_speed_hz; u8 chip_select; u8 bits_per_word; - u16 mode; + u32 mode; #define SPI_CPHA 0x01 /* clock phase */ #define SPI_CPOL 0x02 /* clock polarity */ #define SPI_MODE_0 (0|0) /* (original MicroWire) */ @@ -330,6 +330,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * must fail if an unrecognized or unsupported mode is requested. * It's always safe to call this unless transfers are pending on * the device whose settings are being modified. + * @set_cs_timing: optional hook for SPI devices to request SPI master + * controller for configuring specific CS setup time, hold time and inactive + * delay interms of clock counts * @transfer: adds a message to the controller's transfer queue. * @cleanup: frees controller-specific state * @can_dma: determine whether this controller supports DMA @@ -363,6 +366,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @unprepare_transfer_hardware: there are currently no more messages on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call + * * @set_cs: set the logic level of the chip select line. May be called * from interrupt context. * @prepare_message: set up the controller to transfer a single message, @@ -439,13 +443,12 @@ struct spi_controller { u16 dma_alignment; /* spi_device.mode flags understood by this controller driver */ - u16 mode_bits; + u32 mode_bits; /* bitmask of supported bits_per_word for transfers */ u32 bits_per_word_mask; #define SPI_BPW_MASK(bits) BIT((bits) - 1) -#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1)) -#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1)) +#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1) /* limits on transfer speed */ u32 min_speed_hz; @@ -489,6 +492,17 @@ struct spi_controller { */ int (*setup)(struct spi_device *spi); + /* + * set_cs_timing() method is for SPI controllers that supports + * configuring CS timing. + * + * This hook allows SPI client drivers to request SPI controllers + * to configure specific CS timing through spi_set_cs_timing() after + * spi_setup(). + */ + void (*set_cs_timing)(struct spi_device *spi, u8 setup_clk_cycles, + u8 hold_clk_cycles, u8 inactive_clk_cycles); + /* bidirectional bulk transfers * * + The transfer() method may not sleep; its main role is @@ -1277,7 +1291,7 @@ struct spi_board_info { /* mode becomes spi_device.mode, and is essential for chips * where the default of SPI_CS_HIGH = 0 is wrong. */ - u16 mode; + u32 mode; /* ... may need additional spi_device chip config data here. * avoid stuff protocol drivers can set; but include stuff diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index b7e021b274dc..4444c2a992cb 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -44,6 +44,7 @@ extern int spi_bitbang_setup_transfer(struct spi_device *spi, /* start or stop queue processing */ extern int spi_bitbang_start(struct spi_bitbang *spi); +extern int spi_bitbang_init(struct spi_bitbang *spi); extern void spi_bitbang_stop(struct spi_bitbang *spi); #endif /* __SPI_BITBANG_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e089157dcf97..ed7c4d6b8235 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -57,6 +57,7 @@ #include <linux/stringify.h> #include <linux/bottom_half.h> #include <asm/barrier.h> +#include <asm/mmiowb.h> /* @@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); + mmiowb_spin_lock(); } #ifndef arch_spin_lock_flags @@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo { __acquire(lock); arch_spin_lock_flags(&lock->raw_lock, *flags); + mmiowb_spin_lock(); } static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { - return arch_spin_trylock(&(lock)->raw_lock); + int ret = arch_spin_trylock(&(lock)->raw_lock); + + if (ret) + mmiowb_spin_lock(); + + return ret; } static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) { + mmiowb_spin_unlock(); arch_spin_unlock(&lock->raw_lock); __release(lock); } diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c495b2d51569..e432cc92c73d 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -56,45 +56,11 @@ struct srcu_struct { }; void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void (*func)(struct rcu_head *head)); -void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced); +void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); -/** - * cleanup_srcu_struct - deconstruct a sleep-RCU structure - * @ssp: structure to clean up. - * - * Must invoke this after you are finished using a given srcu_struct that - * was initialized via init_srcu_struct(), else you leak memory. - */ -static inline void cleanup_srcu_struct(struct srcu_struct *ssp) -{ - _cleanup_srcu_struct(ssp, false); -} - -/** - * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure - * @ssp: structure to clean up. - * - * Must invoke this after you are finished using a given srcu_struct that - * was initialized via init_srcu_struct(), else you leak memory. Also, - * all grace-period processing must have completed. - * - * "Completed" means that the last synchronize_srcu() and - * synchronize_srcu_expedited() calls must have returned before the call - * to cleanup_srcu_struct_quiesced(). It also means that the callback - * from the last call_srcu() must have been invoked before the call to - * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help - * with this last. Violating these rules will get you a WARN_ON() splat - * (with high probability, anyway), and will also cause the srcu_struct - * to be leaked. - */ -static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp) -{ - _cleanup_srcu_struct(ssp, true); -} - #ifdef CONFIG_DEBUG_LOCK_ALLOC /** diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 7978b3e2c1e1..0805dee1b6b8 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -23,10 +23,10 @@ typedef u32 depot_stack_handle_t; -struct stack_trace; +depot_stack_handle_t stack_depot_save(unsigned long *entries, + unsigned int nr_entries, gfp_t gfp_flags); -depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags); - -void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace); +unsigned int stack_depot_fetch(depot_stack_handle_t handle, + unsigned long **entries); #endif diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index ba29a0613e66..f0cfd12cb45e 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -3,11 +3,64 @@ #define __LINUX_STACKTRACE_H #include <linux/types.h> +#include <asm/errno.h> struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE +void stack_trace_print(unsigned long *trace, unsigned int nr_entries, + int spaces); +int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, + unsigned int nr_entries, int spaces); +unsigned int stack_trace_save(unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_tsk(struct task_struct *task, + unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, + unsigned int size, unsigned int skipnr); +unsigned int stack_trace_save_user(unsigned long *store, unsigned int size); + +/* Internal interfaces. Do not use in generic code */ +#ifdef CONFIG_ARCH_STACKWALK + +/** + * stack_trace_consume_fn - Callback for arch_stack_walk() + * @cookie: Caller supplied pointer handed back by arch_stack_walk() + * @addr: The stack entry address to consume + * @reliable: True when the stack entry is reliable. Required by + * some printk based consumers. + * + * Return: True, if the entry was consumed or skipped + * False, if there is no space left to store + */ +typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr, + bool reliable); +/** + * arch_stack_walk - Architecture specific function to walk the stack + * @consume_entry: Callback which is invoked by the architecture code for + * each entry. + * @cookie: Caller supplied pointer which is handed back to + * @consume_entry + * @task: Pointer to a task struct, can be NULL + * @regs: Pointer to registers, can be NULL + * + * ============ ======= ============================================ + * task regs + * ============ ======= ============================================ + * task NULL Stack trace from task (can be current) + * current regs Stack trace starting on regs->stackpointer + * ============ ======= ============================================ + */ +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs); +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task); +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs); + +#else /* CONFIG_ARCH_STACKWALK */ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; @@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, struct stack_trace *trace); - -extern void print_stack_trace(struct stack_trace *trace, int spaces); -extern int snprint_stack_trace(char *buf, size_t size, - struct stack_trace *trace, int spaces); - -#ifdef CONFIG_USER_STACKTRACE_SUPPORT extern void save_stack_trace_user(struct stack_trace *trace); +#endif /* !CONFIG_ARCH_STACKWALK */ +#endif /* CONFIG_STACKTRACE */ + +#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE) +int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, + unsigned int size); #else -# define save_stack_trace_user(trace) do { } while (0) +static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk, + unsigned long *store, + unsigned int size) +{ + return -ENOSYS; +} #endif -#else /* !CONFIG_STACKTRACE */ -# define save_stack_trace(trace) do { } while (0) -# define save_stack_trace_tsk(tsk, trace) do { } while (0) -# define save_stack_trace_user(trace) do { } while (0) -# define print_stack_trace(trace, spaces) do { } while (0) -# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) -# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; }) -#endif /* CONFIG_STACKTRACE */ - #endif /* __LINUX_STACKTRACE_H */ diff --git a/include/linux/string.h b/include/linux/string.h index 6ab0a6fa512e..4deb11f7976b 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -31,6 +31,10 @@ size_t strlcpy(char *, const char *, size_t); #ifndef __HAVE_ARCH_STRSCPY ssize_t strscpy(char *, const char *, size_t); #endif + +/* Wraps calls to strscpy()/memset(), no arch specific code required */ +ssize_t strscpy_pad(char *dest, const char *src, size_t count); + #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index ec861cd0cfe8..52d41d0c1ae1 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -304,12 +304,4 @@ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) } #endif /* CONFIG_SUNRPC_SWAP */ -static inline bool -rpc_task_need_resched(const struct rpc_task *task) -{ - if (RPC_IS_QUEUED(task) || task->tk_callback) - return true; - return false; -} - #endif /* _LINUX_SUNRPC_SCHED_H_ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3f529ad9a9d2..6b3ea9ea6a9e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -425,6 +425,7 @@ void restore_processor_state(void); /* kernel/power/main.c */ extern int register_pm_notifier(struct notifier_block *nb); extern int unregister_pm_notifier(struct notifier_block *nb); +extern void ksys_sync_helper(void); #define pm_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ @@ -462,6 +463,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) return 0; } +static inline void ksys_sync_helper(void) {} + #define pm_notifier(fn, pri) do { (void)(fn); } while (0) static inline bool pm_wakeup_pending(void) { return false; } diff --git a/include/linux/tick.h b/include/linux/tick.h index 55388ab45fd4..f92a10b5e112 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -68,6 +68,12 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode); static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } #endif /* BROADCAST */ +#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU) +extern void tick_offline_cpu(unsigned int cpu); +#else +static inline void tick_offline_cpu(unsigned int cpu) { } +#endif + #ifdef CONFIG_GENERIC_CLOCKEVENTS extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); #else @@ -122,6 +128,7 @@ extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern bool tick_nohz_idle_got_tick(void); +extern ktime_t tick_nohz_get_next_hrtimer(void); extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); @@ -145,7 +152,11 @@ static inline void tick_nohz_idle_restart_tick(void) { } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } static inline bool tick_nohz_idle_got_tick(void) { return false; } - +static inline ktime_t tick_nohz_get_next_hrtimer(void) +{ + /* Next wake up is the tick period, assume it starts now */ + return ktime_add(ktime_get(), TICK_NSEC); +} static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { *delta_next = TICK_NSEC; diff --git a/include/linux/time64.h b/include/linux/time64.h index f38d382ffec1..a620ee610b9f 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -33,6 +33,17 @@ struct itimerspec64 { #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +/* + * Limits for settimeofday(): + * + * To prevent setting the time close to the wraparound point time setting + * is limited so a reasonable uptime can be accomodated. Uptime of 30 years + * should be really sufficient, which means the cutoff is 2232. At that + * point the cutoff is just a small part of the larger problem. + */ +#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) +#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) + static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { @@ -100,6 +111,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts) return true; } +static inline bool timespec64_valid_settod(const struct timespec64 *ts) +{ + if (!timespec64_valid(ts)) + return false; + /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ + if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) + return false; + return true; +} + /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 37b226e8df13..2b70130af585 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) +static inline unsigned long user_access_save(void) { return 0UL; } +static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY diff --git a/include/linux/uio.h b/include/linux/uio.h index f184af1999a8..2d0131ad4604 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -60,7 +60,7 @@ struct iov_iter { static inline enum iter_type iov_iter_type(const struct iov_iter *i) { - return i->type & ~(READ | WRITE); + return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF); } static inline bool iter_is_iovec(const struct iov_iter *i) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 103a48a48872..12bf0b68ed92 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -115,6 +115,7 @@ struct uprobes_state { struct xol_area *xol_area; }; +extern void __init uprobes_init(void); extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool is_swbp_insn(uprobe_opcode_t *insn); @@ -154,6 +155,10 @@ extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, struct uprobes_state { }; +static inline void uprobes_init(void) +{ +} + #define uprobe_get_trap_addr(regs) instruction_pointer(regs) static inline int diff --git a/include/linux/usb.h b/include/linux/usb.h index 5e49e82c4368..ff010d1fd1c7 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt, * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. - * @pm_usage_cnt: PM usage counter for this interface * @reset_ws: Used for scheduling resets from atomic context. * @resetting_device: USB core reset the device, so use alt setting 0 as * current; needs bandwidth alloc after reset. @@ -257,7 +256,6 @@ struct usb_interface { struct device dev; /* interface specific device info */ struct device *usb_dev; - atomic_t pm_usage_cnt; /* usage counter for autosuspend */ struct work_struct reset_ws; /* for resets in atomic context */ }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index fab02133a919..3dc70adfe5f5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -63,7 +63,7 @@ struct virtqueue; /* * Creates a virtqueue and allocates the descriptor ring. If * may_reduce_num is set, then this may allocate a smaller ring than - * expected. The caller should query virtqueue_get_ring_size to learn + * expected. The caller should query virtqueue_get_vring_size to learn * the actual size of the ring. */ struct virtqueue *vring_create_virtqueue(unsigned int index, diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 398e9c95cd61..c6eebb839552 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -21,6 +21,11 @@ struct notifier_block; /* in notifier.h */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ +/* + * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with + * vfree_atomic(). + */ +#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ /* bits [20..32] reserved for arch specific ioremap internals */ /* @@ -142,6 +147,13 @@ extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); +static inline void set_vm_flush_reset_perms(void *addr) +{ + struct vm_struct *vm = find_vm_area(addr); + + if (vm) + vm->flags |= VM_FLUSH_RESET_PERMS; +} #else static inline int map_kernel_range_noflush(unsigned long start, unsigned long size, @@ -157,6 +169,9 @@ static inline void unmap_kernel_range(unsigned long addr, unsigned long size) { } +static inline void set_vm_flush_reset_perms(void *addr) +{ +} #endif /* Allocate/destroy a 'vmalloc' VM area. */ |