summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cpufreq.h388
-rw-r--r--include/linux/cpuidle.h9
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/inetdevice.h34
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/mlx5/device.h22
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_device.h15
-rw-r--r--include/linux/pci-acpi.h10
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/wait.h57
18 files changed, 319 insertions, 272 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 353ba256f368..a5db4aeefa36 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -481,6 +481,13 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_sleep(u8 sleep_state,
u32 pm1a_control, u32 pm1b_control);
+
+void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
+ u32 val_a, u32 val_b));
+
+acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
+ u32 val_a, u32 val_b);
+
#ifdef CONFIG_X86
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ab0eade73039..801ff9e73679 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -28,6 +28,7 @@ struct cpu {
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
+extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
@@ -172,6 +173,8 @@ extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
+extern void cpu_hotplug_begin(void);
+extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
@@ -197,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
#else /* CONFIG_HOTPLUG_CPU */
+static inline void cpu_hotplug_begin(void) {}
+static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 90d5a15120d5..d568f3975eeb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -11,71 +11,36 @@
#ifndef _LINUX_CPUFREQ_H
#define _LINUX_CPUFREQ_H
-#include <asm/cputime.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/completion.h>
#include <linux/kobject.h>
+#include <linux/notifier.h>
#include <linux/sysfs.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/cpumask.h>
-#include <asm/div64.h>
-
-#define CPUFREQ_NAME_LEN 16
-/* Print length for names. Extra 1 space for accomodating '\n' in prints */
-#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
/*********************************************************************
- * CPUFREQ NOTIFIER INTERFACE *
+ * CPUFREQ INTERFACE *
*********************************************************************/
-
-#define CPUFREQ_TRANSITION_NOTIFIER (0)
-#define CPUFREQ_POLICY_NOTIFIER (1)
-
-#ifdef CONFIG_CPU_FREQ
-int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
-int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
-extern void disable_cpufreq(void);
-#else /* CONFIG_CPU_FREQ */
-static inline int cpufreq_register_notifier(struct notifier_block *nb,
- unsigned int list)
-{
- return 0;
-}
-static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
- unsigned int list)
-{
- return 0;
-}
-static inline void disable_cpufreq(void) { }
-#endif /* CONFIG_CPU_FREQ */
-
-/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
- * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
- * two generic policies are available:
- */
-
-#define CPUFREQ_POLICY_POWERSAVE (1)
-#define CPUFREQ_POLICY_PERFORMANCE (2)
-
-/* Frequency values here are CPU kHz so that hardware which doesn't run
- * with some frequencies can complain without having to guess what per
- * cent / per mille means.
+/*
+ * Frequency values here are CPU kHz
+ *
* Maximum transition latency is in nanoseconds - if it's unknown,
* CPUFREQ_ETERNAL shall be used.
*/
+#define CPUFREQ_ETERNAL (-1)
+#define CPUFREQ_NAME_LEN 16
+/* Print length for names. Extra 1 space for accomodating '\n' in prints */
+#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
+
struct cpufreq_governor;
-/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
-extern struct kobject *cpufreq_global_kobject;
-int cpufreq_get_global_kobject(void);
-void cpufreq_put_global_kobject(void);
-int cpufreq_sysfs_create_file(const struct attribute *attr);
-void cpufreq_sysfs_remove_file(const struct attribute *attr);
+struct cpufreq_freqs {
+ unsigned int cpu; /* cpu nr */
+ unsigned int old;
+ unsigned int new;
+ u8 flags; /* flags of cpufreq_driver, see below. */
+};
-#define CPUFREQ_ETERNAL (-1)
struct cpufreq_cpuinfo {
unsigned int max_freq;
unsigned int min_freq;
@@ -117,123 +82,103 @@ struct cpufreq_policy {
struct cpufreq_real_policy user_policy;
+ struct list_head policy_list;
struct kobject kobj;
struct completion kobj_unregister;
int transition_ongoing; /* Tracks transition status */
};
-#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_INCOMPATIBLE (1)
-#define CPUFREQ_NOTIFY (2)
-#define CPUFREQ_START (3)
-#define CPUFREQ_UPDATE_POLICY_CPU (4)
-
/* Only for ACPI */
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
+void cpufreq_cpu_put(struct cpufreq_policy *policy);
+
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
}
-/******************** cpufreq transition notifiers *******************/
-
-#define CPUFREQ_PRECHANGE (0)
-#define CPUFREQ_POSTCHANGE (1)
-#define CPUFREQ_RESUMECHANGE (8)
-#define CPUFREQ_SUSPENDCHANGE (9)
+/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
+extern struct kobject *cpufreq_global_kobject;
+int cpufreq_get_global_kobject(void);
+void cpufreq_put_global_kobject(void);
+int cpufreq_sysfs_create_file(const struct attribute *attr);
+void cpufreq_sysfs_remove_file(const struct attribute *attr);
-struct cpufreq_freqs {
- unsigned int cpu; /* cpu nr */
- unsigned int old;
- unsigned int new;
- u8 flags; /* flags of cpufreq_driver, see below. */
-};
+#ifdef CONFIG_CPU_FREQ
+unsigned int cpufreq_get(unsigned int cpu);
+unsigned int cpufreq_quick_get(unsigned int cpu);
+unsigned int cpufreq_quick_get_max(unsigned int cpu);
+void disable_cpufreq(void);
-/**
- * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
- * safe)
- * @old: old value
- * @div: divisor
- * @mult: multiplier
- *
- *
- * new = old * mult / div
- */
-static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
- u_int mult)
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_update_policy(unsigned int cpu);
+bool have_governor_per_policy(void);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+#else
+static inline unsigned int cpufreq_get(unsigned int cpu)
{
-#if BITS_PER_LONG == 32
-
- u64 result = ((u64) old) * ((u64) mult);
- do_div(result, div);
- return (unsigned long) result;
-
-#elif BITS_PER_LONG == 64
-
- unsigned long result = old * ((u64) mult);
- result /= div;
- return result;
-
+ return 0;
+}
+static inline unsigned int cpufreq_quick_get(unsigned int cpu)
+{
+ return 0;
+}
+static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ return 0;
+}
+static inline void disable_cpufreq(void) { }
#endif
-};
/*********************************************************************
- * CPUFREQ GOVERNORS *
+ * CPUFREQ DRIVER INTERFACE *
*********************************************************************/
-#define CPUFREQ_GOV_START 1
-#define CPUFREQ_GOV_STOP 2
-#define CPUFREQ_GOV_LIMITS 3
-#define CPUFREQ_GOV_POLICY_INIT 4
-#define CPUFREQ_GOV_POLICY_EXIT 5
+#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
+#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
-struct cpufreq_governor {
- char name[CPUFREQ_NAME_LEN];
- int initialized;
- int (*governor) (struct cpufreq_policy *policy,
- unsigned int event);
- ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
- char *buf);
- int (*store_setspeed) (struct cpufreq_policy *policy,
- unsigned int freq);
- unsigned int max_transition_latency; /* HW must be able to switch to
- next freq faster than this value in nano secs or we
- will fallback to performance governor */
- struct list_head governor_list;
- struct module *owner;
+struct freq_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpufreq_policy *, char *);
+ ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
};
-/*
- * Pass a target to the cpufreq driver.
- */
-extern int cpufreq_driver_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
-extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
+#define cpufreq_freq_attr_ro(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
-extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
- unsigned int cpu);
+#define cpufreq_freq_attr_ro_perm(_name, _perm) \
+static struct freq_attr _name = \
+__ATTR(_name, _perm, show_##_name, NULL)
-int cpufreq_register_governor(struct cpufreq_governor *governor);
-void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+#define cpufreq_freq_attr_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
-/*********************************************************************
- * CPUFREQ DRIVER INTERFACE *
- *********************************************************************/
+struct global_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *a, struct attribute *b,
+ const char *c, size_t count);
+};
-#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
-#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
+#define define_one_global_ro(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_global_rw(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
-struct freq_attr;
struct cpufreq_driver {
- struct module *owner;
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
@@ -258,8 +203,6 @@ struct cpufreq_driver {
unsigned int (*get) (unsigned int cpu);
/* optional */
- unsigned int (*getavg) (struct cpufreq_policy *policy,
- unsigned int cpu);
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
@@ -269,7 +212,6 @@ struct cpufreq_driver {
};
/* flags */
-
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
@@ -281,8 +223,7 @@ struct cpufreq_driver {
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
-void cpufreq_notify_transition(struct cpufreq_policy *policy,
- struct cpufreq_freqs *freqs, unsigned int state);
+const char *cpufreq_get_current_driver(void);
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max)
@@ -300,87 +241,118 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
return;
}
-struct freq_attr {
- struct attribute attr;
- ssize_t (*show)(struct cpufreq_policy *, char *);
- ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
-};
-
-#define cpufreq_freq_attr_ro(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-#define cpufreq_freq_attr_ro_perm(_name, _perm) \
-static struct freq_attr _name = \
-__ATTR(_name, _perm, show_##_name, NULL)
-
-#define cpufreq_freq_attr_rw(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+/*********************************************************************
+ * CPUFREQ NOTIFIER INTERFACE *
+ *********************************************************************/
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
+#define CPUFREQ_TRANSITION_NOTIFIER (0)
+#define CPUFREQ_POLICY_NOTIFIER (1)
-#define define_one_global_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
+/* Transition notifiers */
+#define CPUFREQ_PRECHANGE (0)
+#define CPUFREQ_POSTCHANGE (1)
+#define CPUFREQ_RESUMECHANGE (8)
+#define CPUFREQ_SUSPENDCHANGE (9)
-#define define_one_global_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+/* Policy Notifiers */
+#define CPUFREQ_ADJUST (0)
+#define CPUFREQ_INCOMPATIBLE (1)
+#define CPUFREQ_NOTIFY (2)
+#define CPUFREQ_START (3)
+#define CPUFREQ_UPDATE_POLICY_CPU (4)
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
-void cpufreq_cpu_put(struct cpufreq_policy *data);
-const char *cpufreq_get_current_driver(void);
+#ifdef CONFIG_CPU_FREQ
+int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
+int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
-/*********************************************************************
- * CPUFREQ 2.6. INTERFACE *
- *********************************************************************/
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_update_policy(unsigned int cpu);
-bool have_governor_per_policy(void);
-struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+void cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state);
-#ifdef CONFIG_CPU_FREQ
-/*
- * query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it
- */
-unsigned int cpufreq_get(unsigned int cpu);
-#else
-static inline unsigned int cpufreq_get(unsigned int cpu)
+#else /* CONFIG_CPU_FREQ */
+static inline int cpufreq_register_notifier(struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
-#endif
-
-/*
- * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
- */
-#ifdef CONFIG_CPU_FREQ
-unsigned int cpufreq_quick_get(unsigned int cpu);
-unsigned int cpufreq_quick_get_max(unsigned int cpu);
-#else
-static inline unsigned int cpufreq_quick_get(unsigned int cpu)
+static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
-static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+#endif /* !CONFIG_CPU_FREQ */
+
+/**
+ * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
+ * safe)
+ * @old: old value
+ * @div: divisor
+ * @mult: multiplier
+ *
+ *
+ * new = old * mult / div
+ */
+static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
+ u_int mult)
{
- return 0;
-}
+#if BITS_PER_LONG == 32
+ u64 result = ((u64) old) * ((u64) mult);
+ do_div(result, div);
+ return (unsigned long) result;
+
+#elif BITS_PER_LONG == 64
+ unsigned long result = old * ((u64) mult);
+ result /= div;
+ return result;
#endif
+}
/*********************************************************************
- * CPUFREQ DEFAULT GOVERNOR *
+ * CPUFREQ GOVERNORS *
*********************************************************************/
/*
+ * If (cpufreq_driver->target) exists, the ->governor decides what frequency
+ * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
+ * two generic policies are available:
+ */
+#define CPUFREQ_POLICY_POWERSAVE (1)
+#define CPUFREQ_POLICY_PERFORMANCE (2)
+
+/* Governor Events */
+#define CPUFREQ_GOV_START 1
+#define CPUFREQ_GOV_STOP 2
+#define CPUFREQ_GOV_LIMITS 3
+#define CPUFREQ_GOV_POLICY_INIT 4
+#define CPUFREQ_GOV_POLICY_EXIT 5
+
+struct cpufreq_governor {
+ char name[CPUFREQ_NAME_LEN];
+ int initialized;
+ int (*governor) (struct cpufreq_policy *policy,
+ unsigned int event);
+ ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
+ char *buf);
+ int (*store_setspeed) (struct cpufreq_policy *policy,
+ unsigned int freq);
+ unsigned int max_transition_latency; /* HW must be able to switch to
+ next freq faster than this value in nano secs or we
+ will fallback to performance governor */
+ struct list_head governor_list;
+ struct module *owner;
+};
+
+/* Pass a target to the cpufreq driver */
+int cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation);
+int __cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation);
+int cpufreq_register_governor(struct cpufreq_governor *governor);
+void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+
+/* CPUFREQ DEFAULT GOVERNOR */
+/*
* Performance governor is fallback governor if any other gov failed to auto
* load due latency restrictions
*/
@@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int relation,
unsigned int *index);
-/* the following 3 funtions are for cpufreq core use only */
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
+
+/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
-
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
-
void cpufreq_frequency_table_put_attr(unsigned int cpu);
-ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
-
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 0bc4b74668e9..781addc66f03 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -13,8 +13,6 @@
#include <linux/percpu.h>
#include <linux/list.h>
-#include <linux/kobject.h>
-#include <linux/completion.h>
#include <linux/hrtimer.h>
#define CPUIDLE_STATE_MAX 10
@@ -61,6 +59,10 @@ struct cpuidle_state {
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
+struct cpuidle_device_kobj;
+struct cpuidle_state_kobj;
+struct cpuidle_driver_kobj;
+
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
@@ -71,9 +73,8 @@ struct cpuidle_device {
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
+ struct cpuidle_device_kobj *kobj_dev;
struct list_head device_list;
- struct kobject kobj;
- struct completion kobj_unregister;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
int safe_state_index;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d468..4a12532da8c4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -336,6 +336,7 @@ extern int d_validate(struct dentry *, struct dentry *);
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23f3474..79640e015a86 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
#include <linux/bitmap.h>
#include <linux/if.h>
+#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
-enum
-{
- IPV4_DEVCONF_FORWARDING=1,
- IPV4_DEVCONF_MC_FORWARDING,
- IPV4_DEVCONF_PROXY_ARP,
- IPV4_DEVCONF_ACCEPT_REDIRECTS,
- IPV4_DEVCONF_SECURE_REDIRECTS,
- IPV4_DEVCONF_SEND_REDIRECTS,
- IPV4_DEVCONF_SHARED_MEDIA,
- IPV4_DEVCONF_RP_FILTER,
- IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
- IPV4_DEVCONF_BOOTP_RELAY,
- IPV4_DEVCONF_LOG_MARTIANS,
- IPV4_DEVCONF_TAG,
- IPV4_DEVCONF_ARPFILTER,
- IPV4_DEVCONF_MEDIUM_ID,
- IPV4_DEVCONF_NOXFRM,
- IPV4_DEVCONF_NOPOLICY,
- IPV4_DEVCONF_FORCE_IGMP_VERSION,
- IPV4_DEVCONF_ARP_ANNOUNCE,
- IPV4_DEVCONF_ARP_IGNORE,
- IPV4_DEVCONF_PROMOTE_SECONDARIES,
- IPV4_DEVCONF_ARP_ACCEPT,
- IPV4_DEVCONF_ARP_NOTIFY,
- IPV4_DEVCONF_ACCEPT_LOCAL,
- IPV4_DEVCONF_SRC_VMARK,
- IPV4_DEVCONF_PROXY_ARP_PVLAN,
- IPV4_DEVCONF_ROUTE_LOCALNET,
- __IPV4_DEVCONF_MAX
-};
-
-#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
-
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95bc766c..b8b7dc755752 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -101,6 +101,7 @@ struct inet6_skb_parm {
#define IP6SKB_FORWARDED 2
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
+#define IP6SKB_FRAGMENTED 16
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 737685e9e852..68029b30c3dc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
- u8 rsvd22[4];
- __be16 max_qp_mcg;
- u8 rsvd23;
+ __be32 max_qp_mcg;
+ u8 rsvd22[3];
u8 log_max_mcg;
- u8 rsvd24;
+ u8 rsvd23;
u8 log_max_pd;
- u8 rsvd25;
+ u8 rsvd24;
u8 log_max_xrcd;
- u8 rsvd26[42];
+ u8 rsvd25[42];
__be16 log_uar_page_sz;
- u8 rsvd27[28];
+ u8 rsvd26[28];
u8 log_msx_atomic_size_qp;
- u8 rsvd28[2];
+ u8 rsvd27[2];
u8 log_msx_atomic_size_dc;
- u8 rsvd29[76];
+ u8 rsvd28[76];
};
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 func_id;
- u8 rsvd1[2];
- __be16 num_pages;
- __be32 rsvd2[5];
+ __be32 num_pages;
+ __be32 rsvd1[5];
};
union ev_data {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2aa258b0ced1..8888381fc150 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,7 +358,7 @@ struct mlx5_caps {
u32 reserved_lkey;
u8 local_ca_ack_delay;
u8 log_max_mcg;
- u16 max_qp_mcg;
+ u32 max_qp_mcg;
int min_page_sz;
};
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages);
+ s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
-typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
-int mlx5_register_health_report_handler(health_handler_t handler);
-void mlx5_unregister_health_report_handler(void);
const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa16c01..faf4b7c1ad12 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@ struct mm_struct {
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/of.h b/include/linux/of.h
index 1fd08ca23106..c0bb2f188048 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -266,6 +266,7 @@ extern int of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
+extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -459,6 +460,12 @@ static inline const void *of_get_property(const struct device_node *node,
return NULL;
}
+static inline struct device_node *of_get_cpu_node(int cpu,
+ unsigned int *thread)
+{
+ return NULL;
+}
+
static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 9d27475feec1..82ce324fdce7 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
+#include <linux/cpu.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h> /* temporary until merge */
@@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev)
of_node_put(dev->of_node);
}
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ struct device *cpu_dev;
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return NULL;
+ return of_node_get(cpu_dev->of_node);
+}
+
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device(
{
return NULL;
}
+
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ return NULL;
+}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 170447977278..d006f0ca60f4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -47,24 +47,22 @@ void acpi_pci_remove_bus(struct pci_bus *bus);
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
-void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle);
+void acpi_pci_slot_enumerate(struct pci_bus *bus);
void acpi_pci_slot_remove(struct pci_bus *bus);
#else
static inline void acpi_pci_slot_init(void) { }
-static inline void acpi_pci_slot_enumerate(struct pci_bus *bus,
- acpi_handle handle) { }
+static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { }
static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
#endif
#ifdef CONFIG_HOTPLUG_PCI_ACPI
void acpiphp_init(void);
-void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
+void acpiphp_enumerate_slots(struct pci_bus *bus);
void acpiphp_remove_slots(struct pci_bus *bus);
void acpiphp_check_host_bridge(acpi_handle handle);
#else
static inline void acpiphp_init(void) { }
-static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
- acpi_handle handle) { }
+static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d722490da030..078066daffd4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(struct task_struct *p)
{
@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p)
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
@@ -1894,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949a..75f34949d9ab 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
#endif /*arch_spin_is_contended*/
#endif
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
#endif
/**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a415..8d4fa82bfb91 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
swp_entry_t arch_entry;
BUG_ON(pte_file(pte));
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d700a293..84662ecc7b51 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
int __user *);
#else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+ int __user *, int);
+#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
int __user *, int);
#endif
+#endif
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7f..a67fc1635592 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
__ret; \
})
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
+ lock, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (signal_pending(current)) { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&lock); \
+ ret = schedule_timeout(ret); \
+ spin_lock_irq(&lock); \
+ if (!ret) \
+ break; \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+ timeout) \
+({ \
+ int __ret = timeout; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, __ret); \
+ __ret; \
+})
+
/*
* These are the old interfaces to sleep waiting for an event.