summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_pad.c16
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/battery.c77
-rw-r--r--drivers/acpi/bus.c53
-rw-r--r--drivers/acpi/processor_driver.c7
-rw-r--r--drivers/acpi/scan.c16
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/utils.c64
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c64
-rw-r--r--drivers/clocksource/tcb_clksrc.c8
-rw-r--r--drivers/clocksource/timer-marco.c2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c16
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c52
-rw-r--r--drivers/cpuidle/Kconfig.arm6
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-clps711x.c64
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c365
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c7
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/synaptics.c166
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-mpath.c14
-rw-r--r--drivers/md/dm-thin.c12
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c23
-rw-r--r--drivers/power/power_supply_core.c15
48 files changed, 776 insertions, 567 deletions
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 37d73024b82e..f148a0580e04 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -156,12 +156,13 @@ static int power_saving_thread(void *data)
while (!kthread_should_stop()) {
int cpu;
- u64 expire_time;
+ unsigned long expire_time;
try_to_freeze();
/* round robin to cpus */
- if (last_jiffies + round_robin_time * HZ < jiffies) {
+ expire_time = last_jiffies + round_robin_time * HZ;
+ if (time_before(expire_time, jiffies)) {
last_jiffies = jiffies;
round_robin_cpu(tsk_index);
}
@@ -200,7 +201,7 @@ static int power_saving_thread(void *data)
CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
local_irq_enable();
- if (jiffies > expire_time) {
+ if (time_before(expire_time, jiffies)) {
do_sleep = 1;
break;
}
@@ -215,8 +216,15 @@ static int power_saving_thread(void *data)
* borrow CPU time from this CPU and cause RT task use > 95%
* CPU time. To make 'avoid starvation' work, takes a nap here.
*/
- if (do_sleep)
+ if (unlikely(do_sleep))
schedule_timeout_killable(HZ * idle_pct / 100);
+
+ /* If an external event has set the need_resched flag, then
+ * we need to deal with it, or this loop will continue to
+ * spin without calling __mwait().
+ */
+ if (unlikely(need_resched()))
+ schedule();
}
exit_round_robin(tsk_index);
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 52c81c49cc7d..1c085742644f 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -268,7 +268,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->apic_id = apic_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
- if (!cpu0_initialized) {
+ if (!cpu0_initialized && !acpi_lapic) {
cpu0_initialized = 1;
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if ((cpu_index == -1) && (num_online_cpus() == 1))
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6e7b2a12860d..e48fc98e71c4 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -56,6 +56,10 @@
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
+#define ACPI_BATTERY_STATE_DISCHARGING 0x1
+#define ACPI_BATTERY_STATE_CHARGING 0x2
+#define ACPI_BATTERY_STATE_CRITICAL 0x4
+
#define _COMPONENT ACPI_BATTERY_COMPONENT
ACPI_MODULE_NAME("battery");
@@ -169,7 +173,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery);
static int acpi_battery_is_charged(struct acpi_battery *battery)
{
- /* either charging or discharging */
+ /* charging, discharging or critical low */
if (battery->state != 0)
return 0;
@@ -204,9 +208,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (battery->state & 0x01)
+ if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else if (battery->state & 0x02)
+ else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -269,6 +273,17 @@ static int acpi_battery_get_property(struct power_supply *psy,
else
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
+ (battery->capacity_now <= battery->alarm))
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (acpi_battery_is_charged(battery))
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->model_number;
break;
@@ -296,6 +311,7 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -313,6 +329,7 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -605,7 +622,8 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
battery->bat.get_property = acpi_battery_get_property;
- result = power_supply_register(&battery->device->dev, &battery->bat);
+ result = power_supply_register_no_ws(&battery->device->dev, &battery->bat);
+
if (result)
return result;
return device_create_file(battery->bat.dev, &alarm_attr);
@@ -696,7 +714,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
}
}
-static int acpi_battery_update(struct acpi_battery *battery)
+static int acpi_battery_update(struct acpi_battery *battery, bool resume)
{
int result, old_present = acpi_battery_present(battery);
result = acpi_battery_get_status(battery);
@@ -707,6 +725,10 @@ static int acpi_battery_update(struct acpi_battery *battery)
battery->update_time = 0;
return 0;
}
+
+ if (resume)
+ return 0;
+
if (!battery->update_time ||
old_present != acpi_battery_present(battery)) {
result = acpi_battery_get_info(battery);
@@ -720,7 +742,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
return result;
}
result = acpi_battery_get_state(battery);
+ if (result)
+ return result;
acpi_battery_quirks(battery);
+
+ /*
+ * Wakeup the system if battery is critical low
+ * or lower than the alarm level
+ */
+ if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
+ (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
+ (battery->capacity_now <= battery->alarm)))
+ pm_wakeup_event(&battery->device->dev, 0);
+
return result;
}
@@ -915,7 +949,7 @@ static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
static int acpi_battery_read(int fid, struct seq_file *seq)
{
struct acpi_battery *battery = seq->private;
- int result = acpi_battery_update(battery);
+ int result = acpi_battery_update(battery, false);
return acpi_print_funcs[fid](seq, result);
}
@@ -1030,7 +1064,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
old = battery->bat.dev;
if (event == ACPI_BATTERY_NOTIFY_INFO)
acpi_battery_refresh(battery);
- acpi_battery_update(battery);
+ acpi_battery_update(battery, false);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
@@ -1045,13 +1079,27 @@ static int battery_notify(struct notifier_block *nb,
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
+ int result;
+
switch (mode) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
- if (battery->bat.dev) {
- sysfs_remove_battery(battery);
- sysfs_add_battery(battery);
- }
+ if (!acpi_battery_present(battery))
+ return 0;
+
+ if (!battery->bat.dev) {
+ result = acpi_battery_get_info(battery);
+ if (result)
+ return result;
+
+ result = sysfs_add_battery(battery);
+ if (result)
+ return result;
+ } else
+ acpi_battery_refresh(battery);
+
+ acpi_battery_init_alarm(battery);
+ acpi_battery_get_state(battery);
break;
}
@@ -1087,7 +1135,7 @@ static int acpi_battery_add(struct acpi_device *device)
mutex_init(&battery->sysfs_lock);
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
- result = acpi_battery_update(battery);
+ result = acpi_battery_update(battery, false);
if (result)
goto fail;
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -1107,6 +1155,8 @@ static int acpi_battery_add(struct acpi_device *device)
battery->pm_nb.notifier_call = battery_notify;
register_pm_notifier(&battery->pm_nb);
+ device_init_wakeup(&device->dev, 1);
+
return result;
fail:
@@ -1123,6 +1173,7 @@ static int acpi_battery_remove(struct acpi_device *device)
if (!device || !acpi_driver_data(device))
return -EINVAL;
+ device_init_wakeup(&device->dev, 0);
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -1149,7 +1200,7 @@ static int acpi_battery_resume(struct device *dev)
return -EINVAL;
battery->update_time = 0;
- acpi_battery_update(battery);
+ acpi_battery_update(battery, true);
return 0;
}
#else
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index cf925c4f36b7..4ce0ea1e5a4e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -52,6 +52,12 @@ struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir);
#ifdef CONFIG_X86
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+static inline int set_copy_dsdt(const struct dmi_system_id *id)
+{
+ return 0;
+}
+#else
static int set_copy_dsdt(const struct dmi_system_id *id)
{
printk(KERN_NOTICE "%s detected - "
@@ -59,6 +65,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
acpi_gbl_copy_dsdt_locally = 1;
return 0;
}
+#endif
static struct dmi_system_id dsdt_dmi_table[] __initdata = {
/*
@@ -132,6 +139,21 @@ void acpi_bus_private_data_handler(acpi_handle handle,
}
EXPORT_SYMBOL(acpi_bus_private_data_handler);
+int acpi_bus_attach_private_data(acpi_handle handle, void *data)
+{
+ acpi_status status;
+
+ status = acpi_attach_data(handle,
+ acpi_bus_private_data_handler, data);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "Error attaching device data\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_attach_private_data);
+
int acpi_bus_get_private_data(acpi_handle handle, void **data)
{
acpi_status status;
@@ -140,15 +162,20 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
return -EINVAL;
status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
- if (ACPI_FAILURE(status) || !*data) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
- handle));
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "No context for object\n");
return -ENODEV;
}
return 0;
}
-EXPORT_SYMBOL(acpi_bus_get_private_data);
+EXPORT_SYMBOL_GPL(acpi_bus_get_private_data);
+
+void acpi_bus_detach_private_data(acpi_handle handle)
+{
+ acpi_detach_data(handle, acpi_bus_private_data_handler);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
void acpi_bus_no_hotplug(acpi_handle handle)
{
@@ -340,16 +367,18 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
struct acpi_device *adev;
struct acpi_driver *driver;
- acpi_status status;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+ bool hotplug_event = false;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
+ hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
+ hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_WAKE:
@@ -358,6 +387,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
+ hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
@@ -393,16 +423,9 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
(driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
driver->ops.notify(adev, type);
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- case ACPI_NOTIFY_DEVICE_CHECK:
- case ACPI_NOTIFY_EJECT_REQUEST:
- status = acpi_hotplug_schedule(adev, type);
- if (ACPI_SUCCESS(status))
- return;
- default:
- break;
- }
+ if (hotplug_event && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
+ return;
+
acpi_bus_put_acpi_device(adev);
return;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 7f70f3182d50..4fcbd670415c 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -121,6 +121,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
struct acpi_processor *pr = per_cpu(processors, cpu);
struct acpi_device *device;
+ /*
+ * CPU_STARTING and CPU_DYING must not sleep. Return here since
+ * acpi_bus_get_device() may sleep.
+ */
+ if (action == CPU_STARTING || action == CPU_DYING)
+ return NOTIFY_DONE;
+
if (!pr || acpi_bus_get_device(pr->handle, &device))
return NOTIFY_DONE;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index df49fc8276b9..f775fa0d850f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2313,12 +2313,16 @@ int __init acpi_scan_init(void)
if (result)
goto out;
- result = acpi_bus_scan_fixed();
- if (result) {
- acpi_detach_data(acpi_root->handle, acpi_scan_drop_device);
- acpi_device_del(acpi_root);
- put_device(&acpi_root->dev);
- goto out;
+ /* Fixed feature devices do not exist on HW-reduced platform */
+ if (!acpi_gbl_reduced_hardware) {
+ result = acpi_bus_scan_fixed();
+ if (result) {
+ acpi_detach_data(acpi_root->handle,
+ acpi_scan_drop_device);
+ acpi_device_del(acpi_root);
+ put_device(&acpi_root->dev);
+ goto out;
+ }
}
acpi_update_all_gpes();
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c1e31a41f949..25bbc55dca89 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
static void __exit acpi_thermal_exit(void)
{
- destroy_workqueue(acpi_thermal_pm_queue);
acpi_bus_unregister_driver(&acpi_thermal_driver);
+ destroy_workqueue(acpi_thermal_pm_queue);
return;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index bba526148583..07c8c5a5ee95 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/acpi.h>
+#include <linux/dynamic_debug.h>
#include "internal.h"
@@ -457,6 +458,24 @@ acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
EXPORT_SYMBOL(acpi_evaluate_ost);
/**
+ * acpi_handle_path: Return the object path of handle
+ *
+ * Caller must free the returned buffer
+ */
+static char *acpi_handle_path(acpi_handle handle)
+{
+ struct acpi_buffer buffer = {
+ .length = ACPI_ALLOCATE_BUFFER,
+ .pointer = NULL
+ };
+
+ if (in_interrupt() ||
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
+ return NULL;
+ return buffer.pointer;
+}
+
+/**
* acpi_handle_printk: Print message with ACPI prefix and object path
*
* This function is called through acpi_handle_<level> macros and prints
@@ -469,29 +488,50 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- struct acpi_buffer buffer = {
- .length = ACPI_ALLOCATE_BUFFER,
- .pointer = NULL
- };
const char *path;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- if (in_interrupt() ||
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
- path = "<n/a>";
- else
- path = buffer.pointer;
-
- printk("%sACPI: %s: %pV", level, path, &vaf);
+ path = acpi_handle_path(handle);
+ printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf);
va_end(args);
- kfree(buffer.pointer);
+ kfree(path);
}
EXPORT_SYMBOL(acpi_handle_printk);
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/**
+ * __acpi_handle_debug: pr_debug with ACPI prefix and object path
+ *
+ * This function is called through acpi_handle_debug macro and debug
+ * prints a message with ACPI prefix and object path. This function
+ * acquires the global namespace mutex to obtain an object path. In
+ * interrupt context, it shows the object path as <n/a>.
+ */
+void
+__acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ const char *path;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ path = acpi_handle_path(handle);
+ __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf);
+
+ va_end(args);
+ kfree(path);
+}
+EXPORT_SYMBOL(__acpi_handle_debug);
+#endif
+
/**
* acpi_has_method: Check whether @handle has a method named @name
* @handle: ACPI device handle
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 2d56f4113ae7..eb1bd2ecad8b 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable)
{
int ret = 0;
+ if (!dev)
+ return -EINVAL;
+
if (enable) {
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
} else {
+ if (dev->power.can_wakeup)
+ device_wakeup_disable(dev);
+
device_set_wakeup_capable(dev, false);
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6d8a87f252de..cb9b1f8326c3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
- spin_unlock_irqrestore(&vblk->vq_lock, flags);
/* In case queue is stopped waiting for more buffers. */
if (req_done)
blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
}
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vq);
- spin_unlock_irqrestore(&vblk->vq_lock, flags);
blk_mq_stop_hw_queue(hctx);
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 4637697c139f..3fbee4540228 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -147,7 +147,7 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
static int _round_up_table(const struct clk_div_table *table, int div)
{
const struct clk_div_table *clkt;
- int up = _get_table_maxdiv(table);
+ int up = INT_MAX;
for (clkt = table; clkt->div; clkt++) {
if (clkt->div == div)
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index bca0a0badbfa..a886702f7c8b 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
gate->lock = odf_lock;
div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
+ if (!div) {
+ kfree(gate);
return ERR_PTR(-ENOMEM);
+ }
div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
div->reg = reg + pll_data->odf[odf].offset;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index e1769addf435..6aad8abc69a2 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -58,9 +58,9 @@
#define PLLDU_LFCON_SET_DIVN 600
#define PLLE_BASE_DIVCML_SHIFT 24
-#define PLLE_BASE_DIVCML_WIDTH 4
+#define PLLE_BASE_DIVCML_MASK 0xf
#define PLLE_BASE_DIVP_SHIFT 16
-#define PLLE_BASE_DIVP_WIDTH 7
+#define PLLE_BASE_DIVP_WIDTH 6
#define PLLE_BASE_DIVN_SHIFT 8
#define PLLE_BASE_DIVN_WIDTH 8
#define PLLE_BASE_DIVM_SHIFT 0
@@ -183,6 +183,14 @@
#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
mask(p->params->div_nmp->divp_width))
+#define divm_shift(p) (p)->params->div_nmp->divm_shift
+#define divn_shift(p) (p)->params->div_nmp->divn_shift
+#define divp_shift(p) (p)->params->div_nmp->divp_shift
+
+#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
+#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
+#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
+
#define divm_max(p) (divm_mask(p))
#define divn_max(p) (divn_mask(p))
#define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
} else {
val = pll_readl_base(pll);
- val &= ~((divm_mask(pll) << div_nmp->divm_shift) |
- (divn_mask(pll) << div_nmp->divn_shift) |
- (divp_mask(pll) << div_nmp->divp_shift));
+ val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
+ divp_mask_shifted(pll));
- val |= ((cfg->m << div_nmp->divm_shift) |
- (cfg->n << div_nmp->divn_shift) |
- (cfg->p << div_nmp->divp_shift));
+ val |= (cfg->m << divm_shift(pll)) |
+ (cfg->n << divn_shift(pll)) |
+ (cfg->p << divp_shift(pll));
pll_writel_base(val, pll);
}
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
/* configure dividers */
val = pll_readl_base(pll);
- val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
- val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
- val |= sel.m << pll->params->div_nmp->divm_shift;
- val |= sel.n << pll->params->div_nmp->divn_shift;
- val |= sel.p << pll->params->div_nmp->divp_shift;
+ val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+ divm_mask_shifted(pll));
+ val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << divm_shift(pll);
+ val |= sel.n << divn_shift(pll);
+ val |= sel.p << divp_shift(pll);
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
pll_writel_base(val, pll);
}
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
pll_writel_misc(val, pll);
val = readl(pll->clk_base + PLLE_SS_CTRL);
+ val &= ~PLLE_SS_COEFFICIENTS_MASK;
val |= PLLE_SS_DISABLE;
writel(val, pll->clk_base + PLLE_SS_CTRL);
- val |= pll_readl_base(pll);
+ val = pll_readl_base(pll);
val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
pll_writel_base(val, pll);
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
pll_writel(val, PLLE_SS_CTRL, pll);
val = pll_readl_base(pll);
- val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
- val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
- val |= sel.m << pll->params->div_nmp->divm_shift;
- val |= sel.n << pll->params->div_nmp->divn_shift;
+ val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+ divm_mask_shifted(pll));
+ val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << divm_shift(pll);
+ val |= sel.n << divn_shift(pll);
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
pll_writel_base(val, pll);
udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
return clk;
}
+static struct div_nmp pll_e_nmp = {
+ .divn_shift = PLLE_BASE_DIVN_SHIFT,
+ .divn_width = PLLE_BASE_DIVN_WIDTH,
+ .divm_shift = PLLE_BASE_DIVM_SHIFT,
+ .divm_width = PLLE_BASE_DIVM_WIDTH,
+ .divp_shift = PLLE_BASE_DIVP_SHIFT,
+ .divp_width = PLLE_BASE_DIVP_WIDTH,
+};
+
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+
+ if (!pll_params->div_nmp)
+ pll_params->div_nmp = &pll_e_nmp;
+
pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
int m;
m = _pll_fixed_mdiv(pll_params, parent_rate);
- val = m << PLL_BASE_DIVM_SHIFT;
- val |= (pll_params->vco_min / parent_rate)
- << PLL_BASE_DIVN_SHIFT;
+ val = m << divm_shift(pll);
+ val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
pll_writel_base(val, pll);
}
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 00fdd1170284..a8d7ea14f183 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable_unprepare(tcd->clk);
+ clk_disable(tcd->clk);
}
switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
* of oneshot, we get lower overhead and improved accuracy.
*/
case CLOCK_EVT_MODE_PERIODIC:
- clk_prepare_enable(tcd->clk);
+ clk_enable(tcd->clk);
/* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
break;
case CLOCK_EVT_MODE_ONESHOT:
- clk_prepare_enable(tcd->clk);
+ clk_enable(tcd->clk);
/* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
ret = clk_prepare_enable(t2_clk);
if (ret)
return ret;
- clk_disable_unprepare(t2_clk);
+ clk_disable(t2_clk);
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index b52e1c078b99..7f5374dbefd9 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
action->dev_id = ce;
BUG_ON(setup_irq(ce->irq, action));
- irq_set_affinity(action->irq, cpumask_of(cpu));
+ irq_force_affinity(action->irq, cpumask_of(cpu));
clockevents_register_device(ce);
return 0;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 1bf6bbac3e03..09b9129c7bd3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
return -ENOENT;
}
- cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0");
+ cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
/*
* If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
PTR_ERR(cpu_reg));
}
- cpu_clk = devm_clk_get(cpu_dev, NULL);
+ cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
- goto out_put_node;
+ goto out_put_reg;
}
ret = of_init_opp_table(cpu_dev);
if (ret) {
pr_err("failed to init OPP table: %d\n", ret);
- goto out_put_node;
+ goto out_put_clk;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
- goto out_put_node;
+ goto out_put_clk;
}
of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
out_free_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_clk:
+ if (!IS_ERR(cpu_clk))
+ clk_put(cpu_clk);
+out_put_reg:
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
out_put_node:
of_node_put(np);
return ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ba43991ba98a..e1c6433b16e0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&dbs_data->mutex);
+ if (!cpu_cdbs->cur_policy) {
+ mutex_unlock(&dbs_data->mutex);
+ break;
+ }
mutex_lock(&cpu_cdbs->timer_mutex);
if (policy->max < cpu_cdbs->cur_policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
policy->min, CPUFREQ_RELATION_L);
dbs_check_cpu(dbs_data, cpu);
mutex_unlock(&cpu_cdbs->timer_mutex);
+ mutex_unlock(&dbs_data->mutex);
break;
}
return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index eab8ccfe6beb..db2e45b4808e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -40,10 +40,10 @@
#define BYT_TURBO_VIDS 0x66d
-#define FRAC_BITS 6
+#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
-#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
+
static inline int32_t mul_fp(int32_t x, int32_t y)
{
@@ -59,8 +59,8 @@ struct sample {
int32_t core_pct_busy;
u64 aperf;
u64 mperf;
- unsigned long long tsc;
int freq;
+ ktime_t time;
};
struct pstate_data {
@@ -98,9 +98,9 @@ struct cpudata {
struct vid_data vid;
struct _pid pid;
+ ktime_t last_sample_time;
u64 prev_aperf;
u64 prev_mperf;
- unsigned long long prev_tsc;
struct sample sample;
};
@@ -200,7 +200,10 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
pid->last_err = fp_error;
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
-
+ if (result >= 0)
+ result = result + (1 << (FRAC_BITS-1));
+ else
+ result = result - (1 << (FRAC_BITS-1));
return (signed int)fp_toint(result);
}
@@ -560,47 +563,42 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
- int32_t core_pct;
- int32_t c0_pct;
+ int64_t core_pct;
+ int32_t rem;
- core_pct = div_fp(int_tofp((sample->aperf)),
- int_tofp((sample->mperf)));
- core_pct = mul_fp(core_pct, int_tofp(100));
- FP_ROUNDUP(core_pct);
+ core_pct = int_tofp(sample->aperf) * int_tofp(100);
+ core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem);
- c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
+ if ((rem << 1) >= int_tofp(sample->mperf))
+ core_pct += 1;
sample->freq = fp_toint(
mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
- sample->core_pct_busy = mul_fp(core_pct, c0_pct);
+ sample->core_pct_busy = (int32_t)core_pct;
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
- unsigned long long tsc;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- tsc = native_read_tsc();
aperf = aperf >> FRAC_BITS;
mperf = mperf >> FRAC_BITS;
- tsc = tsc >> FRAC_BITS;
+ cpu->last_sample_time = cpu->sample.time;
+ cpu->sample.time = ktime_get();
cpu->sample.aperf = aperf;
cpu->sample.mperf = mperf;
- cpu->sample.tsc = tsc;
cpu->sample.aperf -= cpu->prev_aperf;
cpu->sample.mperf -= cpu->prev_mperf;
- cpu->sample.tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu, &cpu->sample);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
- cpu->prev_tsc = tsc;
}
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -614,13 +612,25 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
- int32_t core_busy, max_pstate, current_pstate;
+ int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+ u32 duration_us;
+ u32 sample_time;
core_busy = cpu->sample.core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
- return FP_ROUNDUP(core_busy);
+
+ sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC);
+ duration_us = (u32) ktime_us_delta(cpu->sample.time,
+ cpu->last_sample_time);
+ if (duration_us > sample_time * 3) {
+ sample_ratio = div_fp(int_tofp(sample_time),
+ int_tofp(duration_us));
+ core_busy = mul_fp(core_busy, sample_ratio);
+ }
+
+ return core_busy;
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 97ccc31dbdd8..371e75d2348d 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -13,6 +13,12 @@ config ARM_BIG_LITTLE_CPUIDLE
define different C-states for little and big cores through the
multiple CPU idle drivers infrastructure.
+config ARM_CLPS711X_CPUIDLE
+ bool "CPU Idle Driver for CLPS711X processors"
+ depends on ARCH_CLPS711X || COMPILE_TEST
+ help
+ Select this to enable cpuidle on Cirrus Logic CLPS711X SOCs.
+
config ARM_HIGHBANK_CPUIDLE
bool "CPU Idle Driver for Calxeda processors"
depends on ARM_PSCI
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index f71ae1b373c5..534fff575823 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
##################################################################################
# ARM SoC drivers
obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o
+obj-$(CONFIG_ARM_CLPS711X_CPUIDLE) += cpuidle-clps711x.o
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c
new file mode 100644
index 000000000000..5243811daa6e
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-clps711x.c
@@ -0,0 +1,64 @@
+/*
+ * CLPS711X CPU idle driver
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define CLPS711X_CPUIDLE_NAME "clps711x-cpuidle"
+
+static void __iomem *clps711x_halt;
+
+static int clps711x_cpuidle_halt(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ writel(0xaa, clps711x_halt);
+
+ return index;
+}
+
+static struct cpuidle_driver clps711x_idle_driver = {
+ .name = CLPS711X_CPUIDLE_NAME,
+ .owner = THIS_MODULE,
+ .states[0] = {
+ .name = "HALT",
+ .desc = "CLPS711X HALT",
+ .enter = clps711x_cpuidle_halt,
+ .exit_latency = 1,
+ },
+ .state_count = 1,
+};
+
+static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(clps711x_halt))
+ return PTR_ERR(clps711x_halt);
+
+ return cpuidle_register(&clps711x_idle_driver, NULL);
+}
+
+static struct platform_driver clps711x_cpuidle_driver = {
+ .driver = {
+ .name = CLPS711X_CPUIDLE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("CLPS711X CPU idle driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cfdbb92aae1d..7a740769c2fa 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
- IRQF_SHARED, "dw_dmac", dw);
- if (err)
- return err;
-
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+ err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+ "dw_dmac", dw);
+ if (err)
+ return err;
+
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
+ free_irq(chip->irq, dw);
tasklet_kill(&dw->tasklet);
list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe15..5ebdfbc1051e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
struct sa11x0_dma_desc *txd_load;
unsigned sg_done;
struct sa11x0_dma_desc *txd_done;
-#ifdef CONFIG_PM_SLEEP
u32 dbs[2];
u32 dbt[2];
u32 dcsr;
-#endif
};
struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sa11x0_dma_suspend(struct device *dev)
{
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
.suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c98764aeeec6..f477308b6e9c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define LOCAL_BUS 0xffc0
-/* arbitrarily chosen maximum range for physical DMA: 128 TB */
-#define FW_MAX_PHYSICAL_RANGE (128ULL << 40)
+/* OHCI-1394's default upper bound for physical DMA: 4 GB */
+#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8db663219560..586f2f7f6993 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
version >> 16, version & 0xff, ohci->card.index,
ohci->n_ir, ohci->n_it, ohci->quirks,
reg_read(ohci, OHCI1394_PhyUpperBound) ?
- ", >4 GB phys DMA" : "");
+ ", physUB" : "");
return 0;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..eedb023af27d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
flush_workqueue(dev_priv->wq);
mutex_lock(&dev->struct_mutex);
- i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 108e1ec2fa4b..388c028e223c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
#define WATCH_LISTS 0
#define WATCH_GTT 0
-#define I915_GEM_PHYS_CURSOR_0 1
-#define I915_GEM_PHYS_CURSOR_1 2
-#define I915_GEM_PHYS_OVERLAY_REGS 3
-#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
-
-struct drm_i915_gem_phys_object {
- int id;
- struct page **page_list;
- drm_dma_handle_t *handle;
- struct drm_i915_gem_object *cur_obj;
-};
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
- /* storage for physical objects */
- struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
-
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
size_t object_memory;
@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
struct drm_file *pin_filp;
/** for phy allocated objects */
- struct drm_i915_gem_phys_object *phys_obj;
+ drm_dma_handle_t *phys_handle;
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
#define PIN_GLOBAL 0x4
+#define PIN_OFFSET_BIAS 0x8
+#define PIN_OFFSET_MASK (~4095)
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- unsigned flags);
+ uint64_t flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
-int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- int id,
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
-void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
int min_size,
unsigned alignment,
unsigned cache_level,
+ unsigned long start,
+ unsigned long end,
unsigned flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2871ce75f438..3326770c9ed2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
-static int i915_gem_phys_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file);
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+{
+ drm_dma_handle_t *phys = obj->phys_handle;
+
+ if (!phys)
+ return;
+
+ if (obj->madv == I915_MADV_WILLNEED) {
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ char *vaddr = phys->vaddr;
+ int i;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page = shmem_read_mapping_page(mapping, i);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(dst, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+ vaddr += PAGE_SIZE;
+ }
+ i915_gem_chipset_flush(obj->base.dev);
+ }
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+ drm_pci_free(obj->base.dev, phys);
+ obj->phys_handle = NULL;
+}
+
+int
+i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
+ int align)
+{
+ drm_dma_handle_t *phys;
+ struct address_space *mapping;
+ char *vaddr;
+ int i;
+
+ if (obj->phys_handle) {
+ if ((unsigned long)obj->phys_handle->vaddr & (align -1))
+ return -EBUSY;
+
+ return 0;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
+ if (obj->base.filp == NULL)
+ return -EINVAL;
+
+ /* create a new object */
+ phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
+ if (!phys)
+ return -ENOMEM;
+
+ vaddr = phys->vaddr;
+#ifdef CONFIG_X86
+ set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
+#endif
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+ drm_pci_free(obj->base.dev, phys);
+ return PTR_ERR(page);
+ }
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+
+ vaddr += PAGE_SIZE;
+ }
+
+ obj->phys_handle = phys;
+ return 0;
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_device *dev = obj->base.dev;
+ void *vaddr = obj->phys_handle->vaddr + args->offset;
+ char __user *user_data = to_user_ptr(args->data_ptr);
+
+ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+ unsigned long unwritten;
+
+ /* The physical object once assigned is fixed for the lifetime
+ * of the obj, so we can safely drop the lock and continue
+ * to access vaddr.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ unwritten = copy_from_user(vaddr, user_data, args->size);
+ mutex_lock(&dev->struct_mutex);
+ if (unwritten)
+ return -EFAULT;
+ }
+
+ i915_gem_chipset_flush(dev);
+ return 0;
+}
+
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_obj) {
- ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ if (obj->phys_handle) {
+ ret = i915_gem_phys_pwrite(obj, args, file);
goto out;
}
@@ -3208,12 +3326,14 @@ static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
- unsigned flags)
+ uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
- size_t gtt_max =
+ unsigned long start =
+ flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+ unsigned long end =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->base.size > gtt_max) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+ if (obj->base.size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
obj->base.size,
flags & PIN_MAPPABLE ? "mappable" : "total",
- gtt_max);
+ end);
return ERR_PTR(-E2BIG);
}
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
- obj->cache_level, 0, gtt_max,
+ obj->cache_level,
+ start, end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
ret = i915_gem_evict_something(dev, vm, size, alignment,
- obj->cache_level, flags);
+ obj->cache_level,
+ start, end,
+ flags);
if (ret == 0)
goto search_free;
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return ret;
}
+static bool
+i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (alignment &&
+ vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+ return true;
+
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
+
+ return false;
+}
+
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- unsigned flags)
+ uint64_t flags)
{
struct i915_vma *vma;
int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if ((alignment &&
- vma->node.start & (alignment - 1)) ||
- (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
+ if (i915_vma_misplaced(vma, alignment, flags)) {
WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
i915_gem_obj_offset(obj, vm), alignment,
- flags & PIN_MAPPABLE,
+ !!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- if (obj->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
int ret;
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
}
}
+ i915_gem_object_detach_phys(obj);
+
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
-/*
- * Create a physically contiguous memory object for this object
- * e.g. for cursor + overlay regs
- */
-static int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size, int align)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_phys_object *phys_obj;
- int ret;
-
- if (dev_priv->mm.phys_objs[id - 1] || !size)
- return 0;
-
- phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
- if (!phys_obj)
- return -ENOMEM;
-
- phys_obj->id = id;
-
- phys_obj->handle = drm_pci_alloc(dev, size, align);
- if (!phys_obj->handle) {
- ret = -ENOMEM;
- goto kfree_obj;
- }
-#ifdef CONFIG_X86
- set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
-
- dev_priv->mm.phys_objs[id - 1] = phys_obj;
-
- return 0;
-kfree_obj:
- kfree(phys_obj);
- return ret;
-}
-
-static void i915_gem_free_phys_object(struct drm_device *dev, int id)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_phys_object *phys_obj;
-
- if (!dev_priv->mm.phys_objs[id - 1])
- return;
-
- phys_obj = dev_priv->mm.phys_objs[id - 1];
- if (phys_obj->cur_obj) {
- i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
- }
-
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
- drm_pci_free(dev, phys_obj->handle);
- kfree(phys_obj);
- dev_priv->mm.phys_objs[id - 1] = NULL;
-}
-
-void i915_gem_free_all_phys_object(struct drm_device *dev)
-{
- int i;
-
- for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
- i915_gem_free_phys_object(dev, i);
-}
-
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj)
-{
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- char *vaddr;
- int i;
- int page_count;
-
- if (!obj->phys_obj)
- return;
- vaddr = obj->phys_obj->handle->vaddr;
-
- page_count = obj->base.size / PAGE_SIZE;
- for (i = 0; i < page_count; i++) {
- struct page *page = shmem_read_mapping_page(mapping, i);
- if (!IS_ERR(page)) {
- char *dst = kmap_atomic(page);
- memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
- kunmap_atomic(dst);
-
- drm_clflush_pages(&page, 1);
-
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
- }
- }
- i915_gem_chipset_flush(dev);
-
- obj->phys_obj->cur_obj = NULL;
- obj->phys_obj = NULL;
-}
-
-int
-i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- int id,
- int align)
-{
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
- int page_count;
- int i;
-
- if (id > I915_MAX_PHYS_OBJECT)
- return -EINVAL;
-
- if (obj->phys_obj) {
- if (obj->phys_obj->id == id)
- return 0;
- i915_gem_detach_phys_object(dev, obj);
- }
-
- /* create a new object */
- if (!dev_priv->mm.phys_objs[id - 1]) {
- ret = i915_gem_init_phys_object(dev, id,
- obj->base.size, align);
- if (ret) {
- DRM_ERROR("failed to init phys object %d size: %zu\n",
- id, obj->base.size);
- return ret;
- }
- }
-
- /* bind to the object */
- obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
- obj->phys_obj->cur_obj = obj;
-
- page_count = obj->base.size / PAGE_SIZE;
-
- for (i = 0; i < page_count; i++) {
- struct page *page;
- char *dst, *src;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- src = kmap_atomic(page);
- dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(src);
-
- mark_page_accessed(page);
- page_cache_release(page);
- }
-
- return 0;
-}
-
-static int
-i915_gem_phys_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
-{
- void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
- char __user *user_data = to_user_ptr(args->data_ptr);
-
- if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
- unsigned long unwritten;
-
- /* The physical object once assigned is fixed for the lifetime
- * of the obj, so we can safely drop the lock and continue
- * to access vaddr.
- */
- mutex_unlock(&dev->struct_mutex);
- unwritten = copy_from_user(vaddr, user_data, args->size);
- mutex_lock(&dev->struct_mutex);
- if (unwritten)
- return -EFAULT;
- }
-
- i915_gem_chipset_flush(dev);
- return 0;
-}
-
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
+ unsigned long start, unsigned long end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
*/
INIT_LIST_HEAD(&unwind_list);
- if (flags & PIN_MAPPABLE) {
- BUG_ON(!i915_is_ggtt(vm));
+ if (start != 0 || end != vm->total) {
drm_mm_init_scan_with_range(&vm->mm, min_size,
- alignment, cache_level, 0,
- dev_priv->gtt.mappable_end);
+ alignment, cache_level,
+ start, end);
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2c9d9cbaf653..20fef6c50267 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+
+#define BATCH_OFFSET_BIAS (256*1024)
struct eb_vmas {
struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence;
- unsigned flags;
+ uint64_t flags;
int ret;
flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+ flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0;
}
+static bool
+eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
+{
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+ bool need_fence, need_mappable;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable = need_fence || need_reloc_mappable(vma);
+
+ WARN_ON((need_mappable || need_fence) &&
+ !i915_is_ggtt(vma->vm));
+
+ if (entry->alignment &&
+ vma->node.start & (entry->alignment - 1))
+ return true;
+
+ if (need_mappable && !obj->map_and_fenceable)
+ return true;
+
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+ vma->node.start < BATCH_OFFSET_BIAS)
+ return true;
+
+ return false;
+}
+
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(vma, vmas, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- bool need_fence, need_mappable;
-
- obj = vma->obj;
-
if (!drm_mm_node_allocated(&vma->node))
continue;
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(vma);
-
- WARN_ON((need_mappable || need_fence) &&
- !i915_is_ggtt(vma->vm));
-
- if ((entry->alignment &&
- vma->node.start & (entry->alignment - 1)) ||
- (need_mappable && !obj->map_and_fenceable))
+ if (eb_vma_misplaced(vma, has_fenced_gpu_access))
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
* relocations were valid.
*/
for (j = 0; j < exec[i].relocation_count; j++) {
- if (copy_to_user(&user_relocs[j].presumed_offset,
- &invalid_offset,
- sizeof(invalid_offset))) {
+ if (__copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static struct drm_i915_gem_object *
+eb_get_batch(struct eb_vmas *eb)
+{
+ struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+ return vma->obj;
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
+ batch_obj = eb_get_batch(eb);
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
+ struct drm_i915_gem_exec_object __user *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+
/* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user(to_user_ptr(args->buffers_ptr),
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ break;
+ }
}
}
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user(to_user_ptr(args->buffers_ptr),
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ struct drm_i915_gem_exec_object2 *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+ int i;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user\n",
+ args->buffer_count);
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 154b0f8bb88d..5deb22864c52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1089,7 +1089,9 @@ alloc:
if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_CACHE_NONE, 0);
+ I915_CACHE_NONE,
+ 0, dev_priv->gtt.base.total,
+ 0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 48aa516a1ac0..5b60e25baa32 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_attach_phys_object(dev, obj,
- (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
- align);
+ ret = i915_gem_object_attach_phys(obj, align);
if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
- addr = obj->phys_obj->handle->busaddr;
+ addr = obj->phys_handle->busaddr;
}
if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish:
if (intel_crtc->cursor_bo) {
- if (INTEL_INFO(dev)->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != obj)
- i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
- } else
+ if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc9104dca..129db0c7d835 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
- ret = i915_gem_attach_phys_object(dev, reg_bo,
- I915_GEM_PHYS_OVERLAY_REGS,
- PAGE_SIZE);
+ ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
- overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
+ overlay->flip_addr = reg_bo->phys_handle->busaddr;
} else {
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
/* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *)
- overlay->reg_bo->phys_obj->handle->vaddr;
+ overlay->reg_bo->phys_handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
+ error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
else
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 2b6e0ebcc13a..41ecf8a60611 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
uint32_t domain = r->write_domain ?
r->write_domain : r->read_domains;
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
+ "for command submission\n");
+ return -EINVAL;
+ }
+
p->relocs[i].domain = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
/* we only support VM on some SI+ rings */
- if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("Ring %d requires VM!\n", p->ring);
- return -EINVAL;
+ if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
+ if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
+ DRM_ERROR("Ring %d requires VM!\n", p->ring);
+ return -EINVAL;
+ }
+ } else {
+ if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
+ DRM_ERROR("VM not supported on ring %d!\n",
+ p->ring);
+ return -EINVAL;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0e770bbf7e29..14671406212f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
radeon_restore_bios_scratch_regs(rdev);
- if (fbcon) {
- radeon_fbdev_set_suspend(rdev, 0);
- console_unlock();
- }
-
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
+
+ if (fbcon) {
+ radeon_fbdev_set_suspend(rdev, 0);
+ console_unlock();
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f00dbbf4d806..356b733caafe 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
- ref_div_max = min(128 / post_div, ref_div_max);
+ ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index d9ab99f47612..1f426696de36 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct list_head *head)
{
struct radeon_cs_reloc *list;
- unsigned i, idx, size;
+ unsigned i, idx;
- size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
- list = kmalloc(size, GFP_KERNEL);
+ list = kmalloc_array(vm->max_pde_used + 1,
+ sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (!list)
return NULL;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 76842d7dc2e3..ffc7ad3a2c88 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
select SERIO_GSCPS2 if GSC
help
Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index d8241ba0afa0..a15063bea700 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
unsigned short keycodes[MAX_KEYPAD_KEYS];
int rotary_rel_code[2];
+ unsigned int row_shift;
+
/* state row bits of each column scan */
uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
if ((bits_changed & (1 << row)) == 0)
continue;
- code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
+ code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev, keypad->keycodes[code],
new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
goto failed_put_clk;
}
+ keypad->row_shift = get_count_order(pdata->matrix_key_cols);
+
if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
(pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
input_dev->evbit[0] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index effa9c5f2c5c..6b8441f7bc32 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
select SERIO_GSCPS2 if GSC
help
Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d68d33fb5ac2..c5ec703c727e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
}
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+struct min_max_quirk {
+ const char * const *pnp_ids;
+ int x_min, x_max, y_min, y_max;
+};
+
+static const struct min_max_quirk min_max_pnpid_table[] = {
+ {
+ (const char * const []){"LEN0033", NULL},
+ 1024, 5052, 2258, 4832
+ },
+ {
+ (const char * const []){"LEN0035", "LEN0042", NULL},
+ 1232, 5710, 1156, 4696
+ },
+ {
+ (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+ 1024, 5112, 2024, 4832
+ },
+ {
+ (const char * const []){"LEN2001", NULL},
+ 1024, 5022, 2508, 4832
+ },
+ { }
+};
+
/* This list has been kindly provided by Synaptics. */
static const char * const topbuttonpad_pnp_ids[] = {
"LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN002D",
"LEN002E",
"LEN0033", /* Helix */
- "LEN0034", /* T431s, T540, X1 Carbon 2nd */
+ "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
"LEN0035", /* X240 */
"LEN0036", /* T440 */
"LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0048",
"LEN0049",
"LEN2000",
- "LEN2001",
+ "LEN2001", /* Edge E431 */
"LEN2002",
"LEN2003",
"LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
NULL
};
+static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
+{
+ int i;
+
+ if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
+ for (i = 0; ids[i]; i++)
+ if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
+ return true;
+
+ return false;
+}
+
/*****************************************************************************
* Synaptics communications functions
****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
* Resolution is left zero if touchpad does not support the query
*/
-static const int *quirk_min_max;
-
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
+ int i;
- if (quirk_min_max) {
- priv->x_min = quirk_min_max[0];
- priv->x_max = quirk_min_max[1];
- priv->y_min = quirk_min_max[2];
- priv->y_max = quirk_min_max[3];
- return 0;
- }
+ for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
+ if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
+ priv->x_min = min_max_pnpid_table[i].x_min;
+ priv->x_max = min_max_pnpid_table[i].x_max;
+ priv->y_min = min_max_pnpid_table[i].y_min;
+ priv->y_max = min_max_pnpid_table[i].y_max;
+ return 0;
+ }
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
- /* See if this buttonpad has a top button area */
- if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
- for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
- if (strstr(psmouse->ps2dev.serio->firmware_id,
- topbuttonpad_pnp_ids[i])) {
- __set_bit(INPUT_PROP_TOPBUTTONPAD,
- dev->propbit);
- break;
- }
- }
- }
+ if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
+ __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
/* Clickpads report only left button */
__clear_bit(BTN_RIGHT, dev->keybit);
__clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
{ }
};
-static const struct dmi_system_id min_max_dmi_table[] __initconst = {
-#if defined(CONFIG_DMI)
- {
- /* Lenovo ThinkPad Helix */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
- },
- .driver_data = (int []){1024, 5052, 2258, 4832},
- },
- {
- /* Lenovo ThinkPad X240 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
- },
- .driver_data = (int []){1232, 5710, 1156, 4696},
- },
- {
- /* Lenovo ThinkPad Edge E431 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
- },
- .driver_data = (int []){1024, 5022, 2508, 4832},
- },
- {
- /* Lenovo ThinkPad T431s */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo ThinkPad T440s */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo ThinkPad L440 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo ThinkPad T540p */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
- },
- .driver_data = (int []){1024, 5056, 2058, 4832},
- },
- {
- /* Lenovo ThinkPad L540 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
- {
- /* Lenovo Yoga S1 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
- "ThinkPad S1 Yoga"),
- },
- .driver_data = (int []){1232, 5710, 1156, 4696},
- },
- {
- /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION,
- "ThinkPad X1 Carbon 2nd"),
- },
- .driver_data = (int []){1024, 5112, 2024, 4832},
- },
-#endif
- { }
-};
-
void __init synaptics_module_init(void)
{
- const struct dmi_system_id *min_max_dmi;
-
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
-
- min_max_dmi = dmi_first_match(min_max_dmi_table);
- if (min_max_dmi)
- quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 762b08432de0..8b748d99b934 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
writeb(divisor, KMICLKDIV);
writeb(KMICR_EN, KMICR);
- ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi);
+ ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
+ kmi);
if (ret) {
printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
writeb(0, KMICR);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68edc9db2c64..b845e9370871 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
config TOUCHSCREEN_WM97XX_ATMEL
tristate "WM97xx Atmel accelerated touch"
- depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91)
+ depends on TOUCHSCREEN_WM97XX && AVR32
help
Say Y here for support for streaming mode with WM97xx touchscreens
on Atmel AT91 or AVR32 systems with an AC97C module.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9380be7b1895..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->num_discard_bios = 1;
ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true;
+ /* Discard bios must be split on a block boundary */
+ ti->split_discard_bios = true;
cache->features = ca->features;
ti->per_bio_data_size = get_per_bio_data_size(cache);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fa0f6cbd6a41..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
else
m->saved_queue_if_no_path = queue_if_no_path;
m->queue_if_no_path = queue_if_no_path;
- if (!m->queue_if_no_path)
- dm_table_run_md_queue_async(m->ti->table);
-
spin_unlock_irqrestore(&m->lock, flags);
+ if (!queue_if_no_path)
+ dm_table_run_md_queue_async(m->ti->table);
+
return 0;
}
@@ -954,7 +954,7 @@ out:
*/
static int reinstate_path(struct pgpath *pgpath)
{
- int r = 0;
+ int r = 0, run_queue = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
if (!m->nr_valid_paths++) {
m->current_pgpath = NULL;
- dm_table_run_md_queue_async(m->ti->table);
+ run_queue = 1;
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
out:
spin_unlock_irqrestore(&m->lock, flags);
+ if (run_queue)
+ dm_table_run_md_queue_async(m->ti->table);
return r;
}
@@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
}
if (m->pg_init_required)
__pg_init_all_paths(m);
- dm_table_run_md_queue_async(m->ti->table);
spin_unlock_irqrestore(&m->lock, flags);
+ dm_table_run_md_queue_async(m->ti->table);
}
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2e71de8e0048..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,7 +27,9 @@
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
-#define NO_SPACE_TIMEOUT (HZ * 60)
+#define NO_SPACE_TIMEOUT_SECS 60
+
+static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
+ unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
@@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_prepared_mapping = process_prepared_mapping;
pool->process_prepared_discard = process_prepared_discard_passdown;
- if (!pool->pf.error_if_no_space)
- queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
+ if (!pool->pf.error_if_no_space && no_space_timeout)
+ queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
break;
case PM_WRITE:
@@ -3508,6 +3511,9 @@ static void dm_thin_exit(void)
module_init(dm_thin_init);
module_exit(dm_thin_exit);
+module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
+
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 9802b67040cc..2c61281bebd7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIOF_DIR_IN;
}
-static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
-static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- return pinctrl_gpio_direction_output(chip->base + offset);
-}
-
static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
{
struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
wmt_clearbits(data, reg_data_out, BIT(bit));
}
+static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ wmt_gpio_set_value(chip, offset, value);
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
static struct gpio_chip wmt_gpio_chip = {
.label = "gpio-wmt",
.owner = THIS_MODULE,
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 26606641fe44..5a5a24e7d43c 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -537,7 +537,7 @@ static void psy_unregister_cooler(struct power_supply *psy)
}
#endif
-int power_supply_register(struct device *parent, struct power_supply *psy)
+int __power_supply_register(struct device *parent, struct power_supply *psy, bool ws)
{
struct device *dev;
int rc;
@@ -568,7 +568,7 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
}
spin_lock_init(&psy->changed_lock);
- rc = device_init_wakeup(dev, true);
+ rc = device_init_wakeup(dev, ws);
if (rc)
goto wakeup_init_failed;
@@ -606,8 +606,19 @@ dev_set_name_failed:
success:
return rc;
}
+
+int power_supply_register(struct device *parent, struct power_supply *psy)
+{
+ return __power_supply_register(parent, psy, true);
+}
EXPORT_SYMBOL_GPL(power_supply_register);
+int power_supply_register_no_ws(struct device *parent, struct power_supply *psy)
+{
+ return __power_supply_register(parent, psy, false);
+}
+EXPORT_SYMBOL_GPL(power_supply_register_no_ws);
+
void power_supply_unregister(struct power_supply *psy)
{
cancel_work_sync(&psy->changed_work);