diff options
Diffstat (limited to 'drivers')
834 files changed, 28193 insertions, 11944 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index c95df0b8c880..5d9248526d78 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -235,17 +235,6 @@ config ACPI_INITRD_TABLE_OVERRIDE initrd, therefore it's safe to say Y. See Documentation/acpi/initrd_table_override.txt for details -config ACPI_BLACKLIST_YEAR - int "Disable ACPI for systems before Jan 1st this year" if X86_32 - default 0 - help - Enter a 4-digit year, e.g., 2001, to disable ACPI by default - on platforms with DMI BIOS date before January 1st that year. - "acpi=force" can be used to override this mechanism. - - Enter 0 to disable this mechanism and allow ACPI to - run by default no matter what the year. (default) - config ACPI_DEBUG bool "Debug Statements" default n diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index b9f0d5f4bba5..8711e3797165 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -56,7 +56,6 @@ static int ac_sleep_before_get_state_ms; struct acpi_ac { struct power_supply charger; - struct acpi_device *adev; struct platform_device *pdev; unsigned long long state; }; @@ -70,8 +69,9 @@ struct acpi_ac { static int acpi_ac_get_state(struct acpi_ac *ac) { acpi_status status; + acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); - status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL, + status = acpi_evaluate_integer(handle, "_PSR", NULL, &ac->state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, @@ -119,6 +119,7 @@ static enum power_supply_property ac_props[] = { static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) { struct acpi_ac *ac = data; + struct acpi_device *adev; if (!ac) return; @@ -141,10 +142,11 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) msleep(ac_sleep_before_get_state_ms); acpi_ac_get_state(ac); - acpi_bus_generate_netlink_event(ac->adev->pnp.device_class, + adev = ACPI_COMPANION(&ac->pdev->dev); + acpi_bus_generate_netlink_event(adev->pnp.device_class, dev_name(&ac->pdev->dev), event, (u32) ac->state); - acpi_notifier_call_chain(ac->adev, event, (u32) ac->state); + acpi_notifier_call_chain(adev, event, (u32) ac->state); kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); } @@ -178,8 +180,8 @@ static int acpi_ac_probe(struct platform_device *pdev) if (!pdev) return -EINVAL; - result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); - if (result) + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) return -ENODEV; ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); @@ -188,7 +190,6 @@ static int acpi_ac_probe(struct platform_device *pdev) strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); strcpy(acpi_device_class(adev), ACPI_AC_CLASS); - ac->adev = adev; ac->pdev = pdev; platform_set_drvdata(pdev, ac); diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index d3961014aad7..6745fe137b9e 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -163,6 +163,15 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "80860F41", (unsigned long)&byt_i2c_dev_desc }, { "INT33B2", }, + { "INT3430", (unsigned long)&lpt_dev_desc }, + { "INT3431", (unsigned long)&lpt_dev_desc }, + { "INT3432", (unsigned long)&lpt_dev_desc }, + { "INT3433", (unsigned long)&lpt_dev_desc }, + { "INT3434", (unsigned long)&lpt_uart_dev_desc }, + { "INT3435", (unsigned long)&lpt_uart_dev_desc }, + { "INT3436", (unsigned long)&lpt_sdio_dev_desc }, + { "INT3437", }, + { } }; diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 8a4cfc7e71f0..dbfe49e5fd63 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -111,7 +111,7 @@ int acpi_create_platform_device(struct acpi_device *adev, pdevinfo.id = -1; pdevinfo.res = resources; pdevinfo.num_res = count; - pdevinfo.acpi_node.handle = adev->handle; + pdevinfo.acpi_node.companion = adev; pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) { dev_err(&adev->dev, "platform device creation failed: %ld\n", diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 13e045025c33..517af700399d 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -356,7 +356,7 @@ u8 acpi_ut_valid_internal_object(void *object) default: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "%p is not not an ACPI operand obj [%s]\n", + "%p is not an ACPI operand obj [%s]\n", object, acpi_ut_get_descriptor_name(object))); break; } diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index fb848378d582..078c4f7fe2dd 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -75,39 +75,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { {""} }; -#if CONFIG_ACPI_BLACKLIST_YEAR - -static int __init blacklist_by_year(void) -{ - int year; - - /* Doesn't exist? Likely an old system */ - if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) { - printk(KERN_ERR PREFIX "no DMI BIOS year, " - "acpi=force is required to enable ACPI\n" ); - return 1; - } - /* 0? Likely a buggy new BIOS */ - if (year == 0) { - printk(KERN_ERR PREFIX "DMI BIOS year==0, " - "assuming ACPI-capable machine\n" ); - return 0; - } - if (year < CONFIG_ACPI_BLACKLIST_YEAR) { - printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), " - "acpi=force is required to enable ACPI\n", - year, CONFIG_ACPI_BLACKLIST_YEAR); - return 1; - } - return 0; -} -#else -static inline int blacklist_by_year(void) -{ - return 0; -} -#endif - int __init acpi_blacklisted(void) { int i = 0; @@ -166,8 +133,6 @@ int __init acpi_blacklisted(void) } } - blacklisted += blacklist_by_year(); - dmi_check_system(acpi_osi_dmi_table); return blacklisted; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index d42b2fb5a7e9..b3480cf7db1a 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -22,16 +22,12 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include <linux/device.h> +#include <linux/acpi.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> -#include <acpi/acpi_drivers.h> - #include "internal.h" #define _COMPONENT ACPI_POWER_COMPONENT @@ -548,7 +544,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, */ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) { - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; int ret, d_min, d_max; @@ -656,7 +652,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) if (!device_run_wake(phys_dev)) return -EINVAL; - handle = DEVICE_ACPI_HANDLE(phys_dev); + handle = ACPI_HANDLE(phys_dev); if (!handle || acpi_bus_get_device(handle, &adev)) { dev_dbg(phys_dev, "ACPI handle without context in %s!\n", __func__); @@ -700,7 +696,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) if (!device_can_wakeup(dev)) return -EINVAL; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle || acpi_bus_get_device(handle, &adev)) { dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); return -ENODEV; @@ -722,7 +718,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) */ struct acpi_device *acpi_dev_pm_get_node(struct device *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d5309fd49458..ba5b56db9d27 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -173,9 +173,10 @@ static void start_transaction(struct acpi_ec *ec) static void advance_transaction(struct acpi_ec *ec, u8 status) { unsigned long flags; - struct transaction *t = ec->curr; + struct transaction *t; spin_lock_irqsave(&ec->lock, flags); + t = ec->curr; if (!t) goto unlock; if (t->wlen > t->wi) { diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index fdef416c0ff6..cae3b387b867 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -78,15 +78,17 @@ enum { #define ACPI_GENL_VERSION 0x01 #define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group" +static const struct genl_multicast_group acpi_event_mcgrps[] = { + { .name = ACPI_GENL_MCAST_GROUP_NAME, }, +}; + static struct genl_family acpi_event_genl_family = { .id = GENL_ID_GENERATE, .name = ACPI_GENL_FAMILY_NAME, .version = ACPI_GENL_VERSION, .maxattr = ACPI_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group acpi_event_mcgrp = { - .name = ACPI_GENL_MCAST_GROUP_NAME, + .mcgrps = acpi_event_mcgrps, + .n_mcgrps = ARRAY_SIZE(acpi_event_mcgrps), }; int acpi_bus_generate_netlink_event(const char *device_class, @@ -141,7 +143,7 @@ int acpi_bus_generate_netlink_event(const char *device_class, return result; } - genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC); + genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC); return 0; } @@ -149,18 +151,7 @@ EXPORT_SYMBOL(acpi_bus_generate_netlink_event); static int acpi_event_genetlink_init(void) { - int result; - - result = genl_register_family(&acpi_event_genl_family); - if (result) - return result; - - result = genl_register_mc_group(&acpi_event_genl_family, - &acpi_event_mcgrp); - if (result) - genl_unregister_family(&acpi_event_genl_family); - - return result; + return genl_register_family(&acpi_event_genl_family); } #else diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 10f0f40587bb..a22a295edb69 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -197,30 +197,28 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id) int acpi_bind_one(struct device *dev, acpi_handle handle) { - struct acpi_device *acpi_dev; - acpi_status status; + struct acpi_device *acpi_dev = NULL; struct acpi_device_physical_node *physical_node, *pn; char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; - if (ACPI_HANDLE(dev)) { + if (ACPI_COMPANION(dev)) { if (handle) { - dev_warn(dev, "ACPI handle is already set\n"); + dev_warn(dev, "ACPI companion already set\n"); return -EINVAL; } else { - handle = ACPI_HANDLE(dev); + acpi_dev = ACPI_COMPANION(dev); } + } else { + acpi_bus_get_device(handle, &acpi_dev); } - if (!handle) + if (!acpi_dev) return -EINVAL; + get_device(&acpi_dev->dev); get_device(dev); - status = acpi_bus_get_device(handle, &acpi_dev); - if (ACPI_FAILURE(status)) - goto err; - physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); if (!physical_node) { retval = -ENOMEM; @@ -242,10 +240,11 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) dev_warn(dev, "Already associated with ACPI node\n"); kfree(physical_node); - if (ACPI_HANDLE(dev) != handle) + if (ACPI_COMPANION(dev) != acpi_dev) goto err; put_device(dev); + put_device(&acpi_dev->dev); return 0; } if (pn->node_id == node_id) { @@ -259,8 +258,8 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; - if (!ACPI_HANDLE(dev)) - ACPI_HANDLE_SET(dev, acpi_dev->handle); + if (!ACPI_COMPANION(dev)) + ACPI_COMPANION_SET(dev, acpi_dev); acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, @@ -283,27 +282,21 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) return 0; err: - ACPI_HANDLE_SET(dev, NULL); + ACPI_COMPANION_SET(dev, NULL); put_device(dev); + put_device(&acpi_dev->dev); return retval; } EXPORT_SYMBOL_GPL(acpi_bind_one); int acpi_unbind_one(struct device *dev) { + struct acpi_device *acpi_dev = ACPI_COMPANION(dev); struct acpi_device_physical_node *entry; - struct acpi_device *acpi_dev; - acpi_status status; - if (!ACPI_HANDLE(dev)) + if (!acpi_dev) return 0; - status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev); - if (ACPI_FAILURE(status)) { - dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__); - return -EINVAL; - } - mutex_lock(&acpi_dev->physical_node_lock); list_for_each_entry(entry, &acpi_dev->physical_node_list, node) @@ -316,9 +309,10 @@ int acpi_unbind_one(struct device *dev) acpi_physnode_link_name(physnode_name, entry->node_id); sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); sysfs_remove_link(&dev->kobj, "firmware_node"); - ACPI_HANDLE_SET(dev, NULL); - /* acpi_bind_one() increase refcnt by one. */ + ACPI_COMPANION_SET(dev, NULL); + /* Drop references taken by acpi_bind_one(). */ put_device(dev); + put_device(&acpi_dev->dev); kfree(entry); break; } @@ -328,6 +322,15 @@ int acpi_unbind_one(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_unbind_one); +void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr) +{ + struct acpi_device *adev; + + if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev)) + ACPI_COMPANION_SET(dev, adev); +} +EXPORT_SYMBOL_GPL(acpi_preset_companion); + static int acpi_platform_notify(struct device *dev) { struct acpi_bus_type *type = acpi_get_bus_type(dev); diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 56f05869b08d..0703bff5e60e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -575,6 +575,7 @@ static int acpi_pci_root_add(struct acpi_device *device, dev_err(&device->dev, "Bus %04x:%02x not present in PCI namespace\n", root->segment, (unsigned int)root->secondary.start); + device->driver_data = NULL; result = -ENODEV; goto end; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 55f9dedbbf9f..15daa21fcd05 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -289,24 +289,17 @@ void acpi_bus_device_eject(void *data, u32 ost_src) { struct acpi_device *device = data; acpi_handle handle = device->handle; - struct acpi_scan_handler *handler; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; int error; lock_device_hotplug(); mutex_lock(&acpi_scan_lock); - handler = device->handler; - if (!handler || !handler->hotplug.enabled) { - put_device(&device->dev); - goto err_support; - } - if (ost_src == ACPI_NOTIFY_EJECT_REQUEST) acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); - if (handler->hotplug.mode == AHM_CONTAINER) + if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER) kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); error = acpi_scan_hot_remove(device); @@ -411,8 +404,7 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) break; case ACPI_NOTIFY_EJECT_REQUEST: acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); - status = acpi_bus_get_device(handle, &adev); - if (ACPI_FAILURE(status)) + if (acpi_bus_get_device(handle, &adev)) goto err_out; get_device(&adev->dev); @@ -1997,6 +1989,7 @@ static int acpi_bus_scan_fixed(void) if (result) return result; + device->flags.match_driver = true; result = device_attach(&device->dev); if (result < 0) return result; @@ -2013,6 +2006,7 @@ static int acpi_bus_scan_fixed(void) if (result) return result; + device->flags.match_driver = true; result = device_attach(&device->dev); } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 18dbdff4656e..995e91bcb97b 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -82,13 +82,6 @@ static bool allow_duplicates; module_param(allow_duplicates, bool, 0644); /* - * Some BIOSes claim they use minimum backlight at boot, - * and this may bring dimming screen after boot - */ -static bool use_bios_initial_backlight = 1; -module_param(use_bios_initial_backlight, bool, 0644); - -/* * For Windows 8 systems: if set ture and the GPU driver has * registered a backlight interface, skip registering ACPI video's. */ @@ -406,12 +399,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } -static int video_ignore_initial_backlight(const struct dmi_system_id *d) -{ - use_bios_initial_backlight = 0; - return 0; -} - static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -456,54 +443,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Folio 13-2000", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "Fujitsu E753", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion dm4", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion g6 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP 1000 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion m4", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"), - }, - }, {} }; @@ -839,20 +778,18 @@ acpi_video_init_brightness(struct acpi_video_device *device) if (!device->cap._BQC) goto set_level; - if (use_bios_initial_backlight) { - level = acpi_video_bqc_value_to_level(device, level_old); - /* - * On some buggy laptops, _BQC returns an uninitialized - * value when invoked for the first time, i.e. - * level_old is invalid (no matter whether it's a level - * or an index). Set the backlight to max_level in this case. - */ - for (i = 2; i < br->count; i++) - if (level == br->levels[i]) - break; - if (i == br->count || !level) - level = max_level; - } + level = acpi_video_bqc_value_to_level(device, level_old); + /* + * On some buggy laptops, _BQC returns an uninitialized + * value when invoked for the first time, i.e. + * level_old is invalid (no matter whether it's a level + * or an index). Set the backlight to max_level in this case. + */ + for (i = 2; i < br->count; i++) + if (level == br->levels[i]) + break; + if (i == br->count || !level) + level = max_level; set_level: result = acpi_video_device_lcd_set_level(device, level); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index ab714d2ad978..4372cfa883c9 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -185,7 +185,7 @@ void ata_acpi_bind_port(struct ata_port *ap) if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle) return; - ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no)); + acpi_preset_companion(&ap->tdev, host_handle, ap->port_no); if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; @@ -222,7 +222,7 @@ void ata_acpi_bind_dev(struct ata_device *dev) parent_handle = port_handle; } - ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr)); + acpi_preset_companion(&dev->tdev, parent_handle, adr); register_hotplug_dock_device(ata_dev_acpi_handle(dev), &ata_acpi_dev_dock_ops, dev, NULL, NULL); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 853f610af28f..e88690ebfd82 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) struct dma_async_tx_descriptor *tx; struct dma_chan *chan = acdev->dma_chan; dma_cookie_t cookie; - unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long flags = DMA_PREP_INTERRUPT; int ret = 0; tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 272f00927761..1bdf104e90bb 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev) tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ if (tmp) { memcpy(card->atmdev->esi, tmp->dev_addr, 6); - + dev_put(tmp); printk("%s: ESI %pM\n", card->name, card->atmdev->esi); } /* diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 99802d6f3c60..165c2c299e57 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -49,7 +49,7 @@ struct cma *dma_contiguous_default_area; /* * Default global CMA area size can be defined in kernel's .config. - * This is usefull mainly for distro maintainers to create a kernel + * This is useful mainly for distro maintainers to create a kernel * that works correctly for most supported systems. * The size can be set in bytes or as a percentage of the total memory * in the system. diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 47051cd25113..3a94b799f166 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full( goto err_alloc; pdev->dev.parent = pdevinfo->parent; - ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); + ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion); if (pdevinfo->dma_mask) { /* @@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full( ret = platform_device_add(pdev); if (ret) { err: - ACPI_HANDLE_SET(&pdev->dev, NULL); + ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); err_alloc: diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c12e9b9556be..1b41fca3d65a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1350,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state) device_unlock(dev); + if (error) + pm_runtime_put(dev); + return error; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index f3be496ac8fa..6a680d4de7f1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -11,12 +11,11 @@ #include <linux/string_helpers.h> #include <scsi/scsi_cmnd.h> #include <linux/idr.h> +#include <linux/blk-mq.h> +#include <linux/numa.h> #define PART_BITS 4 -static bool use_bio; -module_param(use_bio, bool, S_IRUGO); - static int major; static DEFINE_IDA(vd_index_ida); @@ -26,13 +25,11 @@ struct virtio_blk { struct virtio_device *vdev; struct virtqueue *vq; - wait_queue_head_t queue_wait; + spinlock_t vq_lock; /* The disk structure for the kernel. */ struct gendisk *disk; - mempool_t *pool; - /* Process context for config space updates */ struct work_struct config_work; @@ -47,31 +44,17 @@ struct virtio_blk /* Ida index - used to track minor number allocations. */ int index; - - /* Scatterlist: can be too big for stack. */ - struct scatterlist sg[/*sg_elems*/]; }; struct virtblk_req { struct request *req; - struct bio *bio; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; - struct work_struct work; - struct virtio_blk *vblk; - int flags; u8 status; struct scatterlist sg[]; }; -enum { - VBLK_IS_FLUSH = 1, - VBLK_REQ_FLUSH = 2, - VBLK_REQ_DATA = 4, - VBLK_REQ_FUA = 8, -}; - static inline int virtblk_result(struct virtblk_req *vbr) { switch (vbr->status) { @@ -84,22 +67,6 @@ static inline int virtblk_result(struct virtblk_req *vbr) } } -static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, - gfp_t gfp_mask) -{ - struct virtblk_req *vbr; - - vbr = mempool_alloc(vblk->pool, gfp_mask); - if (!vbr) - return NULL; - - vbr->vblk = vblk; - if (use_bio) - sg_init_table(vbr->sg, vblk->sg_elems); - - return vbr; -} - static int __virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, struct scatterlist *data_sg, @@ -143,83 +110,8 @@ static int __virtblk_add_req(struct virtqueue *vq, return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); } -static void virtblk_add_req(struct virtblk_req *vbr, bool have_data) -{ - struct virtio_blk *vblk = vbr->vblk; - DEFINE_WAIT(wait); - int ret; - - spin_lock_irq(vblk->disk->queue->queue_lock); - while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg, - have_data)) < 0)) { - prepare_to_wait_exclusive(&vblk->queue_wait, &wait, - TASK_UNINTERRUPTIBLE); - - spin_unlock_irq(vblk->disk->queue->queue_lock); - io_schedule(); - spin_lock_irq(vblk->disk->queue->queue_lock); - - finish_wait(&vblk->queue_wait, &wait); - } - - virtqueue_kick(vblk->vq); - spin_unlock_irq(vblk->disk->queue->queue_lock); -} - -static void virtblk_bio_send_flush(struct virtblk_req *vbr) -{ - vbr->flags |= VBLK_IS_FLUSH; - vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; - vbr->out_hdr.sector = 0; - vbr->out_hdr.ioprio = 0; - - virtblk_add_req(vbr, false); -} - -static void virtblk_bio_send_data(struct virtblk_req *vbr) -{ - struct virtio_blk *vblk = vbr->vblk; - struct bio *bio = vbr->bio; - bool have_data; - - vbr->flags &= ~VBLK_IS_FLUSH; - vbr->out_hdr.type = 0; - vbr->out_hdr.sector = bio->bi_sector; - vbr->out_hdr.ioprio = bio_prio(bio); - - if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) { - have_data = true; - if (bio->bi_rw & REQ_WRITE) - vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; - else - vbr->out_hdr.type |= VIRTIO_BLK_T_IN; - } else - have_data = false; - - virtblk_add_req(vbr, have_data); -} - -static void virtblk_bio_send_data_work(struct work_struct *work) -{ - struct virtblk_req *vbr; - - vbr = container_of(work, struct virtblk_req, work); - - virtblk_bio_send_data(vbr); -} - -static void virtblk_bio_send_flush_work(struct work_struct *work) -{ - struct virtblk_req *vbr; - - vbr = container_of(work, struct virtblk_req, work); - - virtblk_bio_send_flush(vbr); -} - static inline void virtblk_request_done(struct virtblk_req *vbr) { - struct virtio_blk *vblk = vbr->vblk; struct request *req = vbr->req; int error = virtblk_result(vbr); @@ -231,92 +123,45 @@ static inline void virtblk_request_done(struct virtblk_req *vbr) req->errors = (error != 0); } - __blk_end_request_all(req, error); - mempool_free(vbr, vblk->pool); -} - -static inline void virtblk_bio_flush_done(struct virtblk_req *vbr) -{ - struct virtio_blk *vblk = vbr->vblk; - - if (vbr->flags & VBLK_REQ_DATA) { - /* Send out the actual write data */ - INIT_WORK(&vbr->work, virtblk_bio_send_data_work); - queue_work(virtblk_wq, &vbr->work); - } else { - bio_endio(vbr->bio, virtblk_result(vbr)); - mempool_free(vbr, vblk->pool); - } -} - -static inline void virtblk_bio_data_done(struct virtblk_req *vbr) -{ - struct virtio_blk *vblk = vbr->vblk; - - if (unlikely(vbr->flags & VBLK_REQ_FUA)) { - /* Send out a flush before end the bio */ - vbr->flags &= ~VBLK_REQ_DATA; - INIT_WORK(&vbr->work, virtblk_bio_send_flush_work); - queue_work(virtblk_wq, &vbr->work); - } else { - bio_endio(vbr->bio, virtblk_result(vbr)); - mempool_free(vbr, vblk->pool); - } -} - -static inline void virtblk_bio_done(struct virtblk_req *vbr) -{ - if (unlikely(vbr->flags & VBLK_IS_FLUSH)) - virtblk_bio_flush_done(vbr); - else - virtblk_bio_data_done(vbr); + blk_mq_end_io(req, error); } static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; - bool bio_done = false, req_done = false; + bool req_done = false; struct virtblk_req *vbr; unsigned long flags; unsigned int len; - spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); + spin_lock_irqsave(&vblk->vq_lock, flags); do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { - if (vbr->bio) { - virtblk_bio_done(vbr); - bio_done = true; - } else { - virtblk_request_done(vbr); - req_done = true; - } + virtblk_request_done(vbr); + req_done = true; } if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); + spin_unlock_irqrestore(&vblk->vq_lock, flags); + /* In case queue is stopped waiting for more buffers. */ if (req_done) - blk_start_queue(vblk->disk->queue); - spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); - - if (bio_done) - wake_up(&vblk->queue_wait); + blk_mq_start_stopped_hw_queues(vblk->disk->queue); } -static bool do_req(struct request_queue *q, struct virtio_blk *vblk, - struct request *req) +static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) { + struct virtio_blk *vblk = hctx->queue->queuedata; + struct virtblk_req *vbr = req->special; + unsigned long flags; unsigned int num; - struct virtblk_req *vbr; + const bool last = (req->cmd_flags & REQ_END) != 0; - vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); - if (!vbr) - /* When another request finishes we'll try again. */ - return false; + BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); vbr->req = req; - vbr->bio = NULL; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; @@ -344,7 +189,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, } } - num = blk_rq_map_sg(q, vbr->req, vblk->sg); + num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); if (num) { if (rq_data_dir(vbr->req) == WRITE) vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; @@ -352,63 +197,19 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, vbr->out_hdr.type |= VIRTIO_BLK_T_IN; } - if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) { - mempool_free(vbr, vblk->pool); - return false; - } - - return true; -} - -static void virtblk_request(struct request_queue *q) -{ - struct virtio_blk *vblk = q->queuedata; - struct request *req; - unsigned int issued = 0; - - while ((req = blk_peek_request(q)) != NULL) { - BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); - - /* If this request fails, stop queue and wait for something to - finish to restart it. */ - if (!do_req(q, vblk, req)) { - blk_stop_queue(q); - break; - } - blk_start_request(req); - issued++; + spin_lock_irqsave(&vblk->vq_lock, flags); + if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { + virtqueue_kick(vblk->vq); + spin_unlock_irqrestore(&vblk->vq_lock, flags); + blk_mq_stop_hw_queue(hctx); + return BLK_MQ_RQ_QUEUE_BUSY; } - if (issued) + if (last) virtqueue_kick(vblk->vq); -} - -static void virtblk_make_request(struct request_queue *q, struct bio *bio) -{ - struct virtio_blk *vblk = q->queuedata; - struct virtblk_req *vbr; - - BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems); - - vbr = virtblk_alloc_req(vblk, GFP_NOIO); - if (!vbr) { - bio_endio(bio, -ENOMEM); - return; - } - vbr->bio = bio; - vbr->flags = 0; - if (bio->bi_rw & REQ_FLUSH) - vbr->flags |= VBLK_REQ_FLUSH; - if (bio->bi_rw & REQ_FUA) - vbr->flags |= VBLK_REQ_FUA; - if (bio->bi_size) - vbr->flags |= VBLK_REQ_DATA; - - if (unlikely(vbr->flags & VBLK_REQ_FLUSH)) - virtblk_bio_send_flush(vbr); - else - virtblk_bio_send_data(vbr); + spin_unlock_irqrestore(&vblk->vq_lock, flags); + return BLK_MQ_RQ_QUEUE_OK; } /* return id (s/n) string for *disk to *id_str @@ -673,12 +474,35 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); +static struct blk_mq_ops virtio_mq_ops = { + .queue_rq = virtio_queue_rq, + .map_queue = blk_mq_map_queue, + .alloc_hctx = blk_mq_alloc_single_hw_queue, + .free_hctx = blk_mq_free_single_hw_queue, +}; + +static struct blk_mq_reg virtio_mq_reg = { + .ops = &virtio_mq_ops, + .nr_hw_queues = 1, + .queue_depth = 64, + .numa_node = NUMA_NO_NODE, + .flags = BLK_MQ_F_SHOULD_MERGE, +}; + +static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, + struct request *rq, unsigned int nr) +{ + struct virtio_blk *vblk = data; + struct virtblk_req *vbr = rq->special; + + sg_init_table(vbr->sg, vblk->sg_elems); +} + static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; struct request_queue *q; int err, index; - int pool_size; u64 cap; u32 v, blk_size, sg_elems, opt_io_size; @@ -702,17 +526,14 @@ static int virtblk_probe(struct virtio_device *vdev) /* We need an extra sg elements at head and tail. */ sg_elems += 2; - vdev->priv = vblk = kmalloc(sizeof(*vblk) + - sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); + vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); if (!vblk) { err = -ENOMEM; goto out_free_index; } - init_waitqueue_head(&vblk->queue_wait); vblk->vdev = vdev; vblk->sg_elems = sg_elems; - sg_init_table(vblk->sg, vblk->sg_elems); mutex_init(&vblk->config_lock); INIT_WORK(&vblk->config_work, virtblk_config_changed_work); @@ -721,31 +542,27 @@ static int virtblk_probe(struct virtio_device *vdev) err = init_vq(vblk); if (err) goto out_free_vblk; - - pool_size = sizeof(struct virtblk_req); - if (use_bio) - pool_size += sizeof(struct scatterlist) * sg_elems; - vblk->pool = mempool_create_kmalloc_pool(1, pool_size); - if (!vblk->pool) { - err = -ENOMEM; - goto out_free_vq; - } + spin_lock_init(&vblk->vq_lock); /* FIXME: How many partitions? How long is a piece of string? */ vblk->disk = alloc_disk(1 << PART_BITS); if (!vblk->disk) { err = -ENOMEM; - goto out_mempool; + goto out_free_vq; } - q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); + virtio_mq_reg.cmd_size = + sizeof(struct virtblk_req) + + sizeof(struct scatterlist) * sg_elems; + + q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); if (!q) { err = -ENOMEM; goto out_put_disk; } - if (use_bio) - blk_queue_make_request(q, virtblk_make_request); + blk_mq_init_commands(q, virtblk_init_vbr, vblk); + q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); @@ -848,8 +665,6 @@ out_del_disk: blk_cleanup_queue(vblk->disk->queue); out_put_disk: put_disk(vblk->disk); -out_mempool: - mempool_destroy(vblk->pool); out_free_vq: vdev->config->del_vqs(vdev); out_free_vblk: @@ -881,7 +696,6 @@ static void virtblk_remove(struct virtio_device *vdev) refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); put_disk(vblk->disk); - mempool_destroy(vblk->pool); vdev->config->del_vqs(vdev); kfree(vblk); @@ -905,10 +719,7 @@ static int virtblk_freeze(struct virtio_device *vdev) flush_work(&vblk->config_work); - spin_lock_irq(vblk->disk->queue->queue_lock); - blk_stop_queue(vblk->disk->queue); - spin_unlock_irq(vblk->disk->queue->queue_lock); - blk_sync_queue(vblk->disk->queue); + blk_mq_stop_hw_queues(vblk->disk->queue); vdev->config->del_vqs(vdev); return 0; @@ -921,11 +732,9 @@ static int virtblk_restore(struct virtio_device *vdev) vblk->config_enable = true; ret = init_vq(vdev->priv); - if (!ret) { - spin_lock_irq(vblk->disk->queue->queue_lock); - blk_start_queue(vblk->disk->queue); - spin_unlock_irq(vblk->disk->queue->queue_lock); - } + if (!ret) + blk_mq_start_stopped_hw_queues(vblk->disk->queue); + return ret; } #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index 4fe5609eeb72..429b75bb60e8 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -255,6 +255,7 @@ #include <linux/fips.h> #include <linux/ptrace.h> #include <linux/kmemcheck.h> +#include <linux/workqueue.h> #include <linux/irq.h> #include <asm/processor.h> @@ -269,14 +270,28 @@ /* * Configuration information */ -#define INPUT_POOL_WORDS 128 -#define OUTPUT_POOL_WORDS 32 -#define SEC_XFER_SIZE 512 -#define EXTRACT_SIZE 10 +#define INPUT_POOL_SHIFT 12 +#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5)) +#define OUTPUT_POOL_SHIFT 10 +#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5)) +#define SEC_XFER_SIZE 512 +#define EXTRACT_SIZE 10 + +#define DEBUG_RANDOM_BOOT 0 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) /* + * To allow fractional bits to be tracked, the entropy_count field is + * denominated in units of 1/8th bits. + * + * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in + * credit_entropy_bits() needs to be 64 bits wide. + */ +#define ENTROPY_SHIFT 3 +#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) + +/* * The minimum number of bits of entropy before we wake up a read on * /dev/random. Should be enough to do a significant reseed. */ @@ -287,108 +302,100 @@ static int random_read_wakeup_thresh = 64; * should wake up processes which are selecting or polling on write * access to /dev/random. */ -static int random_write_wakeup_thresh = 128; +static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS; /* - * When the input pool goes over trickle_thresh, start dropping most - * samples to avoid wasting CPU time and reduce lock contention. + * The minimum number of seconds between urandom pool resending. We + * do this to limit the amount of entropy that can be drained from the + * input pool even if there are heavy demands on /dev/urandom. */ - -static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; - -static DEFINE_PER_CPU(int, trickle_count); +static int random_min_urandom_seed = 60; /* - * A pool of size .poolwords is stirred with a primitive polynomial - * of degree .poolwords over GF(2). The taps for various sizes are - * defined below. They are chosen to be evenly spaced (minimum RMS - * distance from evenly spaced; the numbers in the comments are a - * scaled squared error sum) except for the last tap, which is 1 to - * get the twisting happening as fast as possible. + * Originally, we used a primitive polynomial of degree .poolwords + * over GF(2). The taps for various sizes are defined below. They + * were chosen to be evenly spaced except for the last tap, which is 1 + * to get the twisting happening as fast as possible. + * + * For the purposes of better mixing, we use the CRC-32 polynomial as + * well to make a (modified) twisted Generalized Feedback Shift + * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR + * generators. ACM Transactions on Modeling and Computer Simulation + * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted + * GFSR generators II. ACM Transactions on Mdeling and Computer + * Simulation 4:254-266) + * + * Thanks to Colin Plumb for suggesting this. + * + * The mixing operation is much less sensitive than the output hash, + * where we use SHA-1. All that we want of mixing operation is that + * it be a good non-cryptographic hash; i.e. it not produce collisions + * when fed "random" data of the sort we expect to see. As long as + * the pool state differs for different inputs, we have preserved the + * input entropy and done a good job. The fact that an intelligent + * attacker can construct inputs that will produce controlled + * alterations to the pool's state is not important because we don't + * consider such inputs to contribute any randomness. The only + * property we need with respect to them is that the attacker can't + * increase his/her knowledge of the pool's state. Since all + * additions are reversible (knowing the final state and the input, + * you can reconstruct the initial state), if an attacker has any + * uncertainty about the initial state, he/she can only shuffle that + * uncertainty about, but never cause any collisions (which would + * decrease the uncertainty). + * + * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and + * Videau in their paper, "The Linux Pseudorandom Number Generator + * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their + * paper, they point out that we are not using a true Twisted GFSR, + * since Matsumoto & Kurita used a trinomial feedback polynomial (that + * is, with only three taps, instead of the six that we are using). + * As a result, the resulting polynomial is neither primitive nor + * irreducible, and hence does not have a maximal period over + * GF(2**32). They suggest a slight change to the generator + * polynomial which improves the resulting TGFSR polynomial to be + * irreducible, which we have made here. */ static struct poolinfo { - int poolwords; + int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits; +#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5) int tap1, tap2, tap3, tap4, tap5; } poolinfo_table[] = { - /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ - { 128, 103, 76, 51, 25, 1 }, - /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ - { 32, 26, 20, 14, 7, 1 }, + /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ + /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */ + { S(128), 104, 76, 51, 25, 1 }, + /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */ + /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */ + { S(32), 26, 19, 14, 7, 1 }, #if 0 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ - { 2048, 1638, 1231, 819, 411, 1 }, + { S(2048), 1638, 1231, 819, 411, 1 }, /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */ - { 1024, 817, 615, 412, 204, 1 }, + { S(1024), 817, 615, 412, 204, 1 }, /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */ - { 1024, 819, 616, 410, 207, 2 }, + { S(1024), 819, 616, 410, 207, 2 }, /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */ - { 512, 411, 308, 208, 104, 1 }, + { S(512), 411, 308, 208, 104, 1 }, /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */ - { 512, 409, 307, 206, 102, 2 }, + { S(512), 409, 307, 206, 102, 2 }, /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */ - { 512, 409, 309, 205, 103, 2 }, + { S(512), 409, 309, 205, 103, 2 }, /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */ - { 256, 205, 155, 101, 52, 1 }, + { S(256), 205, 155, 101, 52, 1 }, /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */ - { 128, 103, 78, 51, 27, 2 }, + { S(128), 103, 78, 51, 27, 2 }, /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */ - { 64, 52, 39, 26, 14, 1 }, + { S(64), 52, 39, 26, 14, 1 }, #endif }; -#define POOLBITS poolwords*32 -#define POOLBYTES poolwords*4 - -/* - * For the purposes of better mixing, we use the CRC-32 polynomial as - * well to make a twisted Generalized Feedback Shift Reigster - * - * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM - * Transactions on Modeling and Computer Simulation 2(3):179-194. - * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators - * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266) - * - * Thanks to Colin Plumb for suggesting this. - * - * We have not analyzed the resultant polynomial to prove it primitive; - * in fact it almost certainly isn't. Nonetheless, the irreducible factors - * of a random large-degree polynomial over GF(2) are more than large enough - * that periodicity is not a concern. - * - * The input hash is much less sensitive than the output hash. All - * that we want of it is that it be a good non-cryptographic hash; - * i.e. it not produce collisions when fed "random" data of the sort - * we expect to see. As long as the pool state differs for different - * inputs, we have preserved the input entropy and done a good job. - * The fact that an intelligent attacker can construct inputs that - * will produce controlled alterations to the pool's state is not - * important because we don't consider such inputs to contribute any - * randomness. The only property we need with respect to them is that - * the attacker can't increase his/her knowledge of the pool's state. - * Since all additions are reversible (knowing the final state and the - * input, you can reconstruct the initial state), if an attacker has - * any uncertainty about the initial state, he/she can only shuffle - * that uncertainty about, but never cause any collisions (which would - * decrease the uncertainty). - * - * The chosen system lets the state of the pool be (essentially) the input - * modulo the generator polymnomial. Now, for random primitive polynomials, - * this is a universal class of hash functions, meaning that the chance - * of a collision is limited by the attacker's knowledge of the generator - * polynomail, so if it is chosen at random, an attacker can never force - * a collision. Here, we use a fixed polynomial, but we *can* assume that - * ###--> it is unknown to the processes generating the input entropy. <-### - * Because of this important property, this is a good, collision-resistant - * hash; hash collisions will occur no more often than chance. - */ - /* * Static global variables */ @@ -396,17 +403,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); static struct fasync_struct *fasync; -static bool debug; -module_param(debug, bool, 0644); -#define DEBUG_ENT(fmt, arg...) do { \ - if (debug) \ - printk(KERN_DEBUG "random %04d %04d %04d: " \ - fmt,\ - input_pool.entropy_count,\ - blocking_pool.entropy_count,\ - nonblocking_pool.entropy_count,\ - ## arg); } while (0) - /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -417,23 +413,26 @@ module_param(debug, bool, 0644); struct entropy_store; struct entropy_store { /* read-only data: */ - struct poolinfo *poolinfo; + const struct poolinfo *poolinfo; __u32 *pool; const char *name; struct entropy_store *pull; - int limit; + struct work_struct push_work; /* read-write data: */ + unsigned long last_pulled; spinlock_t lock; - unsigned add_ptr; - unsigned input_rotate; + unsigned short add_ptr; + unsigned short input_rotate; int entropy_count; int entropy_total; unsigned int initialized:1; - bool last_data_init; + unsigned int limit:1; + unsigned int last_data_init:1; __u8 last_data[EXTRACT_SIZE]; }; +static void push_to_pool(struct work_struct *work); static __u32 input_pool_data[INPUT_POOL_WORDS]; static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS]; @@ -452,7 +451,9 @@ static struct entropy_store blocking_pool = { .limit = 1, .pull = &input_pool, .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), - .pool = blocking_pool_data + .pool = blocking_pool_data, + .push_work = __WORK_INITIALIZER(blocking_pool.push_work, + push_to_pool), }; static struct entropy_store nonblocking_pool = { @@ -460,7 +461,9 @@ static struct entropy_store nonblocking_pool = { .name = "nonblocking", .pull = &input_pool, .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock), - .pool = nonblocking_pool_data + .pool = nonblocking_pool_data, + .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work, + push_to_pool), }; static __u32 const twist_table[8] = { @@ -498,7 +501,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in, /* mix one byte at a time to simplify size handling and churn faster */ while (nbytes--) { - w = rol32(*bytes++, input_rotate & 31); + w = rol32(*bytes++, input_rotate); i = (i - 1) & wordmask; /* XOR in the various taps */ @@ -518,7 +521,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in, * rotation, so that successive passes spread the * input bits across the pool evenly. */ - input_rotate += i ? 7 : 14; + input_rotate = (input_rotate + (i ? 7 : 14)) & 31; } ACCESS_ONCE(r->input_rotate) = input_rotate; @@ -561,65 +564,151 @@ struct fast_pool { * collector. It's hardcoded for an 128 bit pool and assumes that any * locks that might be needed are taken by the caller. */ -static void fast_mix(struct fast_pool *f, const void *in, int nbytes) +static void fast_mix(struct fast_pool *f, __u32 input[4]) { - const char *bytes = in; __u32 w; - unsigned i = f->count; unsigned input_rotate = f->rotate; - while (nbytes--) { - w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^ - f->pool[(i + 1) & 3]; - f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7]; - input_rotate += (i++ & 3) ? 7 : 14; - } - f->count = i; + w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3]; + f->pool[0] = (w >> 3) ^ twist_table[w & 7]; + input_rotate = (input_rotate + 14) & 31; + w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0]; + f->pool[1] = (w >> 3) ^ twist_table[w & 7]; + input_rotate = (input_rotate + 7) & 31; + w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1]; + f->pool[2] = (w >> 3) ^ twist_table[w & 7]; + input_rotate = (input_rotate + 7) & 31; + w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2]; + f->pool[3] = (w >> 3) ^ twist_table[w & 7]; + input_rotate = (input_rotate + 7) & 31; + f->rotate = input_rotate; + f->count++; } /* - * Credit (or debit) the entropy store with n bits of entropy + * Credit (or debit) the entropy store with n bits of entropy. + * Use credit_entropy_bits_safe() if the value comes from userspace + * or otherwise should be checked for extreme values. */ static void credit_entropy_bits(struct entropy_store *r, int nbits) { int entropy_count, orig; + const int pool_size = r->poolinfo->poolfracbits; + int nfrac = nbits << ENTROPY_SHIFT; if (!nbits) return; - DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); retry: entropy_count = orig = ACCESS_ONCE(r->entropy_count); - entropy_count += nbits; + if (nfrac < 0) { + /* Debit */ + entropy_count += nfrac; + } else { + /* + * Credit: we have to account for the possibility of + * overwriting already present entropy. Even in the + * ideal case of pure Shannon entropy, new contributions + * approach the full value asymptotically: + * + * entropy <- entropy + (pool_size - entropy) * + * (1 - exp(-add_entropy/pool_size)) + * + * For add_entropy <= pool_size/2 then + * (1 - exp(-add_entropy/pool_size)) >= + * (add_entropy/pool_size)*0.7869... + * so we can approximate the exponential with + * 3/4*add_entropy/pool_size and still be on the + * safe side by adding at most pool_size/2 at a time. + * + * The use of pool_size-2 in the while statement is to + * prevent rounding artifacts from making the loop + * arbitrarily long; this limits the loop to log2(pool_size)*2 + * turns no matter how large nbits is. + */ + int pnfrac = nfrac; + const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2; + /* The +2 corresponds to the /4 in the denominator */ + + do { + unsigned int anfrac = min(pnfrac, pool_size/2); + unsigned int add = + ((pool_size - entropy_count)*anfrac*3) >> s; + + entropy_count += add; + pnfrac -= anfrac; + } while (unlikely(entropy_count < pool_size-2 && pnfrac)); + } if (entropy_count < 0) { - DEBUG_ENT("negative entropy/overflow\n"); + pr_warn("random: negative entropy/overflow: pool %s count %d\n", + r->name, entropy_count); + WARN_ON(1); entropy_count = 0; - } else if (entropy_count > r->poolinfo->POOLBITS) - entropy_count = r->poolinfo->POOLBITS; + } else if (entropy_count > pool_size) + entropy_count = pool_size; if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) goto retry; - if (!r->initialized && nbits > 0) { - r->entropy_total += nbits; - if (r->entropy_total > 128) { - r->initialized = 1; - if (r == &nonblocking_pool) - prandom_reseed_late(); + r->entropy_total += nbits; + if (!r->initialized && r->entropy_total > 128) { + r->initialized = 1; + r->entropy_total = 0; + if (r == &nonblocking_pool) { + prandom_reseed_late(); + pr_notice("random: %s pool is initialized\n", r->name); } } - trace_credit_entropy_bits(r->name, nbits, entropy_count, + trace_credit_entropy_bits(r->name, nbits, + entropy_count >> ENTROPY_SHIFT, r->entropy_total, _RET_IP_); - /* should we wake readers? */ - if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { - wake_up_interruptible(&random_read_wait); - kill_fasync(&fasync, SIGIO, POLL_IN); + if (r == &input_pool) { + int entropy_bytes = entropy_count >> ENTROPY_SHIFT; + + /* should we wake readers? */ + if (entropy_bytes >= random_read_wakeup_thresh) { + wake_up_interruptible(&random_read_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); + } + /* If the input pool is getting full, send some + * entropy to the two output pools, flipping back and + * forth between them, until the output pools are 75% + * full. + */ + if (entropy_bytes > random_write_wakeup_thresh && + r->initialized && + r->entropy_total >= 2*random_read_wakeup_thresh) { + static struct entropy_store *last = &blocking_pool; + struct entropy_store *other = &blocking_pool; + + if (last == &blocking_pool) + other = &nonblocking_pool; + if (other->entropy_count <= + 3 * other->poolinfo->poolfracbits / 4) + last = other; + if (last->entropy_count <= + 3 * last->poolinfo->poolfracbits / 4) { + schedule_work(&last->push_work); + r->entropy_total = 0; + } + } } } +static void credit_entropy_bits_safe(struct entropy_store *r, int nbits) +{ + const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); + + /* Cap the value to avoid overflows */ + nbits = min(nbits, nbits_max); + nbits = max(nbits, -nbits_max); + + credit_entropy_bits(r, nbits); +} + /********************************************************************* * * Entropy input management @@ -633,6 +722,8 @@ struct timer_rand_state { unsigned dont_count_entropy:1; }; +#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; + /* * Add device- or boot-specific data to the input and nonblocking * pools to help initialize them to unique values. @@ -644,15 +735,22 @@ struct timer_rand_state { void add_device_randomness(const void *buf, unsigned int size) { unsigned long time = random_get_entropy() ^ jiffies; + unsigned long flags; - mix_pool_bytes(&input_pool, buf, size, NULL); - mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); - mix_pool_bytes(&nonblocking_pool, buf, size, NULL); - mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL); + trace_add_device_randomness(size, _RET_IP_); + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&input_pool, buf, size, NULL); + _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); + spin_unlock_irqrestore(&input_pool.lock, flags); + + spin_lock_irqsave(&nonblocking_pool.lock, flags); + _mix_pool_bytes(&nonblocking_pool, buf, size, NULL); + _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL); + spin_unlock_irqrestore(&nonblocking_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); -static struct timer_rand_state input_timer_state; +static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; /* * This function adds entropy to the entropy "pool" by using timing @@ -666,6 +764,7 @@ static struct timer_rand_state input_timer_state; */ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) { + struct entropy_store *r; struct { long jiffies; unsigned cycles; @@ -674,15 +773,12 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) long delta, delta2, delta3; preempt_disable(); - /* if over the trickle threshold, use only 1 in 4096 samples */ - if (input_pool.entropy_count > trickle_thresh && - ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) - goto out; sample.jiffies = jiffies; sample.cycles = random_get_entropy(); sample.num = num; - mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + mix_pool_bytes(r, &sample, sizeof(sample), NULL); /* * Calculate number of bits of randomness we probably added. @@ -716,10 +812,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * Round down by 1 bit on general principles, * and limit entropy entimate to 12 bits. */ - credit_entropy_bits(&input_pool, - min_t(int, fls(delta>>1), 11)); + credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); } -out: preempt_enable(); } @@ -732,10 +826,10 @@ void add_input_randomness(unsigned int type, unsigned int code, if (value == last_value) return; - DEBUG_ENT("input event\n"); last_value = value; add_timer_randomness(&input_timer_state, (type << 4) ^ code ^ (code >> 4) ^ value); + trace_add_input_randomness(ENTROPY_BITS(&input_pool)); } EXPORT_SYMBOL_GPL(add_input_randomness); @@ -747,20 +841,21 @@ void add_interrupt_randomness(int irq, int irq_flags) struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); struct pt_regs *regs = get_irq_regs(); unsigned long now = jiffies; - __u32 input[4], cycles = random_get_entropy(); - - input[0] = cycles ^ jiffies; - input[1] = irq; - if (regs) { - __u64 ip = instruction_pointer(regs); - input[2] = ip; - input[3] = ip >> 32; - } + cycles_t cycles = random_get_entropy(); + __u32 input[4], c_high, j_high; + __u64 ip; - fast_mix(fast_pool, input, sizeof(input)); + c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; + j_high = (sizeof(now) > 4) ? now >> 32 : 0; + input[0] = cycles ^ j_high ^ irq; + input[1] = now ^ c_high; + ip = regs ? instruction_pointer(regs) : _RET_IP_; + input[2] = ip; + input[3] = ip >> 32; - if ((fast_pool->count & 1023) && - !time_after(now, fast_pool->last + HZ)) + fast_mix(fast_pool, input); + + if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ)) return; fast_pool->last = now; @@ -789,10 +884,8 @@ void add_disk_randomness(struct gendisk *disk) if (!disk || !disk->random) return; /* first major is 1, so we get >= 0x200 here */ - DEBUG_ENT("disk event %d:%d\n", - MAJOR(disk_devt(disk)), MINOR(disk_devt(disk))); - add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); + trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); } #endif @@ -810,30 +903,58 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, * from the primary pool to the secondary extraction pool. We make * sure we pull enough for a 'catastrophic reseed'. */ +static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes); static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) { - __u32 tmp[OUTPUT_POOL_WORDS]; + if (r->limit == 0 && random_min_urandom_seed) { + unsigned long now = jiffies; - if (r->pull && r->entropy_count < nbytes * 8 && - r->entropy_count < r->poolinfo->POOLBITS) { - /* If we're limited, always leave two wakeup worth's BITS */ - int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; - int bytes = nbytes; - - /* pull at least as many as BYTES as wakeup BITS */ - bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); - /* but never more than the buffer size */ - bytes = min_t(int, bytes, sizeof(tmp)); - - DEBUG_ENT("going to reseed %s with %d bits " - "(%zu of %d requested)\n", - r->name, bytes * 8, nbytes * 8, r->entropy_count); - - bytes = extract_entropy(r->pull, tmp, bytes, - random_read_wakeup_thresh / 8, rsvd); - mix_pool_bytes(r, tmp, bytes, NULL); - credit_entropy_bits(r, bytes*8); + if (time_before(now, + r->last_pulled + random_min_urandom_seed * HZ)) + return; + r->last_pulled = now; } + if (r->pull && + r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) && + r->entropy_count < r->poolinfo->poolfracbits) + _xfer_secondary_pool(r, nbytes); +} + +static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) +{ + __u32 tmp[OUTPUT_POOL_WORDS]; + + /* For /dev/random's pool, always leave two wakeup worth's BITS */ + int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; + int bytes = nbytes; + + /* pull at least as many as BYTES as wakeup BITS */ + bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); + /* but never more than the buffer size */ + bytes = min_t(int, bytes, sizeof(tmp)); + + trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, + ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); + bytes = extract_entropy(r->pull, tmp, bytes, + random_read_wakeup_thresh / 8, rsvd); + mix_pool_bytes(r, tmp, bytes, NULL); + credit_entropy_bits(r, bytes*8); +} + +/* + * Used as a workqueue function so that when the input pool is getting + * full, we can "spill over" some entropy to the output pools. That + * way the output pools can store some of the excess entropy instead + * of letting it go to waste. + */ +static void push_to_pool(struct work_struct *work) +{ + struct entropy_store *r = container_of(work, struct entropy_store, + push_work); + BUG_ON(!r); + _xfer_secondary_pool(r, random_read_wakeup_thresh/8); + trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT, + r->pull->entropy_count >> ENTROPY_SHIFT); } /* @@ -853,50 +974,48 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, { unsigned long flags; int wakeup_write = 0; + int have_bytes; + int entropy_count, orig; + size_t ibytes; /* Hold lock while accounting */ spin_lock_irqsave(&r->lock, flags); - BUG_ON(r->entropy_count > r->poolinfo->POOLBITS); - DEBUG_ENT("trying to extract %zu bits from %s\n", - nbytes * 8, r->name); + BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); /* Can we pull enough? */ - if (r->entropy_count / 8 < min + reserved) { - nbytes = 0; - } else { - int entropy_count, orig; retry: - entropy_count = orig = ACCESS_ONCE(r->entropy_count); + entropy_count = orig = ACCESS_ONCE(r->entropy_count); + have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); + ibytes = nbytes; + if (have_bytes < min + reserved) { + ibytes = 0; + } else { /* If limited, never pull more than available */ - if (r->limit && nbytes + reserved >= entropy_count / 8) - nbytes = entropy_count/8 - reserved; - - if (entropy_count / 8 >= nbytes + reserved) { - entropy_count -= nbytes*8; - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; - } else { - entropy_count = reserved; - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; - } + if (r->limit && ibytes + reserved >= have_bytes) + ibytes = have_bytes - reserved; - if (entropy_count < random_write_wakeup_thresh) - wakeup_write = 1; - } + if (have_bytes >= ibytes + reserved) + entropy_count -= ibytes << (ENTROPY_SHIFT + 3); + else + entropy_count = reserved << (ENTROPY_SHIFT + 3); - DEBUG_ENT("debiting %zu entropy credits from %s%s\n", - nbytes * 8, r->name, r->limit ? "" : " (unlimited)"); + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; + if ((r->entropy_count >> ENTROPY_SHIFT) + < random_write_wakeup_thresh) + wakeup_write = 1; + } spin_unlock_irqrestore(&r->lock, flags); + trace_debit_entropy(r->name, 8 * ibytes); if (wakeup_write) { wake_up_interruptible(&random_write_wait); kill_fasync(&fasync, SIGIO, POLL_OUT); } - return nbytes; + return ibytes; } static void extract_buf(struct entropy_store *r, __u8 *out) @@ -904,7 +1023,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out) int i; union { __u32 w[5]; - unsigned long l[LONGS(EXTRACT_SIZE)]; + unsigned long l[LONGS(20)]; } hash; __u32 workspace[SHA_WORKSPACE_WORDS]; __u8 extract[64]; @@ -917,6 +1036,17 @@ static void extract_buf(struct entropy_store *r, __u8 *out) sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); /* + * If we have a architectural hardware random number + * generator, mix that in, too. + */ + for (i = 0; i < LONGS(20); i++) { + unsigned long v; + if (!arch_get_random_long(&v)) + break; + hash.l[i] ^= v; + } + + /* * We mix the hash back into the pool to prevent backtracking * attacks (where the attacker knows the state of the pool * plus the current outputs, and attempts to find previous @@ -945,17 +1075,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out) hash.w[1] ^= hash.w[4]; hash.w[2] ^= rol32(hash.w[2], 16); - /* - * If we have a architectural hardware random number - * generator, mix that in, too. - */ - for (i = 0; i < LONGS(EXTRACT_SIZE); i++) { - unsigned long v; - if (!arch_get_random_long(&v)) - break; - hash.l[i] ^= v; - } - memcpy(out, &hash, EXTRACT_SIZE); memset(&hash, 0, sizeof(hash)); } @@ -971,10 +1090,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, if (fips_enabled) { spin_lock_irqsave(&r->lock, flags); if (!r->last_data_init) { - r->last_data_init = true; + r->last_data_init = 1; spin_unlock_irqrestore(&r->lock, flags); trace_extract_entropy(r->name, EXTRACT_SIZE, - r->entropy_count, _RET_IP_); + ENTROPY_BITS(r), _RET_IP_); xfer_secondary_pool(r, EXTRACT_SIZE); extract_buf(r, tmp); spin_lock_irqsave(&r->lock, flags); @@ -983,7 +1102,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, spin_unlock_irqrestore(&r->lock, flags); } - trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); + trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, min, reserved); @@ -1016,7 +1135,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; - trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_); + trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, 0, 0); @@ -1056,6 +1175,14 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, */ void get_random_bytes(void *buf, int nbytes) { +#if DEBUG_RANDOM_BOOT > 0 + if (unlikely(nonblocking_pool.initialized == 0)) + printk(KERN_NOTICE "random: %pF get_random_bytes called " + "with %d bits of entropy available\n", + (void *) _RET_IP_, + nonblocking_pool.entropy_total); +#endif + trace_get_random_bytes(nbytes, _RET_IP_); extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); } EXPORT_SYMBOL(get_random_bytes); @@ -1074,7 +1201,7 @@ void get_random_bytes_arch(void *buf, int nbytes) { char *p = buf; - trace_get_random_bytes(nbytes, _RET_IP_); + trace_get_random_bytes_arch(nbytes, _RET_IP_); while (nbytes) { unsigned long v; int chunk = min(nbytes, (int)sizeof(unsigned long)); @@ -1108,13 +1235,11 @@ static void init_std_data(struct entropy_store *r) ktime_t now = ktime_get_real(); unsigned long rv; - r->entropy_count = 0; - r->entropy_total = 0; - r->last_data_init = false; + r->last_pulled = jiffies; mix_pool_bytes(r, &now, sizeof(now), NULL); - for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) { + for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { if (!arch_get_random_long(&rv)) - break; + rv = random_get_entropy(); mix_pool_bytes(r, &rv, sizeof(rv), NULL); } mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL); @@ -1137,7 +1262,7 @@ static int rand_initialize(void) init_std_data(&nonblocking_pool); return 0; } -module_init(rand_initialize); +early_initcall(rand_initialize); #ifdef CONFIG_BLOCK void rand_initialize_disk(struct gendisk *disk) @@ -1149,8 +1274,10 @@ void rand_initialize_disk(struct gendisk *disk) * source. */ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); - if (state) + if (state) { + state->last_time = INITIAL_JIFFIES; disk->random = state; + } } #endif @@ -1167,8 +1294,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) if (n > SEC_XFER_SIZE) n = SEC_XFER_SIZE; - DEBUG_ENT("reading %zu bits\n", n*8); - n = extract_entropy_user(&blocking_pool, buf, n); if (n < 0) { @@ -1176,8 +1301,9 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) break; } - DEBUG_ENT("read got %zd bits (%zd still needed)\n", - n*8, (nbytes-n)*8); + trace_random_read(n*8, (nbytes-n)*8, + ENTROPY_BITS(&blocking_pool), + ENTROPY_BITS(&input_pool)); if (n == 0) { if (file->f_flags & O_NONBLOCK) { @@ -1185,13 +1311,9 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) break; } - DEBUG_ENT("sleeping?\n"); - wait_event_interruptible(random_read_wait, - input_pool.entropy_count >= - random_read_wakeup_thresh); - - DEBUG_ENT("awake\n"); + ENTROPY_BITS(&input_pool) >= + random_read_wakeup_thresh); if (signal_pending(current)) { retval = -ERESTARTSYS; @@ -1214,7 +1336,18 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { - return extract_entropy_user(&nonblocking_pool, buf, nbytes); + int ret; + + if (unlikely(nonblocking_pool.initialized == 0)) + printk_once(KERN_NOTICE "random: %s urandom read " + "with %d bits of entropy available\n", + current->comm, nonblocking_pool.entropy_total); + + ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); + + trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), + ENTROPY_BITS(&input_pool)); + return ret; } static unsigned int @@ -1225,9 +1358,9 @@ random_poll(struct file *file, poll_table * wait) poll_wait(file, &random_read_wait, wait); poll_wait(file, &random_write_wait, wait); mask = 0; - if (input_pool.entropy_count >= random_read_wakeup_thresh) + if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh) mask |= POLLIN | POLLRDNORM; - if (input_pool.entropy_count < random_write_wakeup_thresh) + if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh) mask |= POLLOUT | POLLWRNORM; return mask; } @@ -1278,7 +1411,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) switch (cmd) { case RNDGETENTCNT: /* inherently racy, no point locking */ - if (put_user(input_pool.entropy_count, p)) + ent_count = ENTROPY_BITS(&input_pool); + if (put_user(ent_count, p)) return -EFAULT; return 0; case RNDADDTOENTCNT: @@ -1286,7 +1420,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (get_user(ent_count, p)) return -EFAULT; - credit_entropy_bits(&input_pool, ent_count); + credit_entropy_bits_safe(&input_pool, ent_count); return 0; case RNDADDENTROPY: if (!capable(CAP_SYS_ADMIN)) @@ -1301,14 +1435,19 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) size); if (retval < 0) return retval; - credit_entropy_bits(&input_pool, ent_count); + credit_entropy_bits_safe(&input_pool, ent_count); return 0; case RNDZAPENTCNT: case RNDCLEARPOOL: - /* Clear the entropy pool counters. */ + /* + * Clear the entropy pool counters. We no longer clear + * the entropy pool, as that's silly. + */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - rand_initialize(); + input_pool.entropy_count = 0; + nonblocking_pool.entropy_count = 0; + blocking_pool.entropy_count = 0; return 0; default: return -EINVAL; @@ -1408,6 +1547,23 @@ static int proc_do_uuid(struct ctl_table *table, int write, return proc_dostring(&fake_table, write, buffer, lenp, ppos); } +/* + * Return entropy available scaled to integral bits + */ +static int proc_do_entropy(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + ctl_table fake_table; + int entropy_count; + + entropy_count = *(int *)table->data >> ENTROPY_SHIFT; + + fake_table.data = &entropy_count; + fake_table.maxlen = sizeof(entropy_count); + + return proc_dointvec(&fake_table, write, buffer, lenp, ppos); +} + static int sysctl_poolsize = INPUT_POOL_WORDS * 32; extern struct ctl_table random_table[]; struct ctl_table random_table[] = { @@ -1422,7 +1578,7 @@ struct ctl_table random_table[] = { .procname = "entropy_avail", .maxlen = sizeof(int), .mode = 0444, - .proc_handler = proc_dointvec, + .proc_handler = proc_do_entropy, .data = &input_pool.entropy_count, }, { @@ -1444,6 +1600,13 @@ struct ctl_table random_table[] = { .extra2 = &max_write_thresh, }, { + .procname = "urandom_min_reseed_secs", + .data = &random_min_urandom_seed, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "boot_id", .data = &sysctl_bootid, .maxlen = 16, diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 0e1d89b4321b..d9e3f671c2ea 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -117,7 +117,7 @@ void __init of_fixed_factor_clk_setup(struct device_node *node) } if (of_property_read_u32(node, "clock-mult", &mult)) { - pr_err("%s Fixed factor clock <%s> must have a clokc-mult property\n", + pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n", __func__, node->name); return; } diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index c73fc2b74de2..18c5b9b16645 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -32,11 +32,23 @@ #include <linux/atomic.h> #include <linux/pid_namespace.h> -#include <asm/unaligned.h> - #include <linux/cn_proc.h> -#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) +/* + * Size of a cn_msg followed by a proc_event structure. Since the + * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we + * add one 4-byte word to the size here, and then start the actual + * cn_msg structure 4 bytes into the stack buffer. The result is that + * the immediately following proc_event structure is aligned to 8 bytes. + */ +#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4) + +/* See comment above; we test our assumption about sizeof struct cn_msg here. */ +static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer) +{ + BUILD_BUG_ON(sizeof(struct cn_msg) != 20); + return (struct cn_msg *)(buffer + 4); +} static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; @@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; struct task_struct *parent; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_FORK; rcu_read_lock(); parent = rcu_dereference(task->real_parent); @@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_EXEC; ev->event_data.exec.process_pid = task->pid; ev->event_data.exec.process_tgid = task->tgid; @@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->what = which_id; @@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id) rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ @@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_SID; ev->event_data.sid.process_pid = task->pid; ev->event_data.sid.process_tgid = task->tgid; @@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_PTRACE; ev->event_data.ptrace.process_pid = task->pid; ev->event_data.ptrace.process_tgid = task->tgid; @@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_COMM; ev->event_data.comm.process_pid = task->pid; ev->event_data.comm.process_tgid = task->tgid; @@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_COREDUMP; ev->event_data.coredump.process_pid = task->pid; ev->event_data.coredump.process_tgid = task->tgid; @@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_EXIT; ev->event_data.exit.process_pid = task->pid; ev->event_data.exit.process_tgid = task->tgid; @@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); msg->seq = rcvd_seq; ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->cpu = -1; ev->what = PROC_EVENT_NONE; ev->event_data.ack.err = err; diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 6897ad85b046..d369349eeaab 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -129,7 +129,7 @@ config X86_AMD_FREQ_SENSITIVITY help This adds AMD-specific powersave bias function to the ondemand governor, which allows it to make more power-conscious frequency - change decisions based on feedback from hardware (availble on AMD + change decisions based on feedback from hardware (available on AMD Family 16h and above). Hardware feedback tells software how "sensitive" to frequency changes diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 218460fcd2e4..25a70d06c5bf 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load) dbs_info->requested_freq += get_freq_target(cs_tuners, policy); + if (dbs_info->requested_freq > policy->max) + dbs_info->requested_freq = policy->max; + __cpufreq_driver_target(policy, dbs_info->requested_freq, CPUFREQ_RELATION_H); return; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0806c31e5764..e6be63561fa6 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -328,10 +328,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_data->cdata->gov_dbs_timer); } - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_dbs_info->down_skip = 0; cs_dbs_info->enable = 1; diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 7b6dc06b1bd4..f3c22874da75 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -67,7 +67,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq) /* * The policy max have been changed so that we cannot get proper * old_index with cpufreq_frequency_table_target(). Thus, ignore - * policy and get the index from the raw freqeuncy table. + * policy and get the index from the raw frequency table. */ old_index = exynos_cpufreq_get_index(old_freq); if (old_index < 0) { diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index be6d14307aa8..a0acd0bfba40 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -53,6 +53,7 @@ static unsigned int omap_getspeed(unsigned int cpu) static int omap_target(struct cpufreq_policy *policy, unsigned int index) { + int r, ret; struct dev_pm_opp *opp; unsigned long freq, volt = 0, volt_old = 0, tol = 0; unsigned int old_freq, new_freq; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index c61a6eccf169..446687cc2334 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -89,14 +89,15 @@ config AT_HDMAC Support the Atmel AHB DMA controller. config FSL_DMA - tristate "Freescale Elo and Elo Plus DMA support" + tristate "Freescale Elo series DMA support" depends on FSL_SOC select DMA_ENGINE select ASYNC_TX_ENABLE_CHANNEL_SWITCH ---help--- - Enable support for the Freescale Elo and Elo Plus DMA controllers. - The Elo is the DMA controller on some 82xx and 83xx parts, and the - Elo Plus is the DMA controller on 85xx and 86xx parts. + Enable support for the Freescale Elo series DMA controllers. + The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the + EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on + some Txxx and Bxxx parts. config MPC512X_DMA tristate "Freescale MPC512x built-in DMA engine support" @@ -313,7 +314,7 @@ config MMP_PDMA depends on (ARCH_MMP || ARCH_PXA) select DMA_ENGINE help - Support the MMP PDMA engine for PXA and MMP platfrom. + Support the MMP PDMA engine for PXA and MMP platform. config DMA_JZ4740 tristate "JZ4740 DMA support" diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e51a9832ef0d..16a2aa28f856 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->vd.tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - static void pl08x_desc_free(struct virt_dma_desc *vd) { struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); - if (!plchan->slave) - pl08x_unmap_buffers(txd); - + dma_descriptor_unmap(txd); if (!txd->done) pl08x_release_mux(plchan); @@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* @@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&plchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { vd = vchan_find_desc(&plchan->vc, cookie); if (vd) { /* On the issued list, so hasn't been processed yet */ @@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); - ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, - DRIVER_NAME, pl08x); + ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); if (ret) { dev_err(&adev->dev, "%s failed to request interrupt %d\n", __func__, adev->irq[0]); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c787f38a186a..e2c04dc81e2a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); - /* unmap dma addresses (not on slave channels) */ - if (!atchan->chan_common.private) { - struct device *parent = chan2parent(&atchan->chan_common); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); /* for cyclic transfers, * no need to replay callback function while stopping */ if (!atc_chan_is_cyclic(atchan)) { @@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan, int bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* * There's no point calculating the residue if there's diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 31011d2a26fc..3c6716e0b78e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dma_set_residue(txstate, coh901318_get_bytes_left(chan)); @@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev) if (irq < 0) return irq; - err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, + err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, "coh901318", base); if (err) return err; diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 7c82b92f9b16..c29dacff66fa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -141,6 +141,9 @@ struct cppi41_dd { const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; + + /* context for suspend/resume */ + unsigned int dma_tdfdq; }; #define FIST_COMPLETION_QUEUE 93 @@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val) return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); } +static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) +{ + u32 desc; + + desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); + desc &= ~0x1f; + return desc; +} + static irqreturn_t cppi41_irq(int irq, void *data) { struct cppi41_dd *cdd = data; @@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) q_num = __fls(val); val &= ~(1 << q_num); q_num += 32 * i; - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); - desc &= ~0x1f; + desc = cppi41_pop_desc(cdd, q_num); c = desc_to_chan(cdd, desc); if (WARN_ON(!c)) { pr_err("%s() q %d desc %08x\n", __func__, @@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, /* lock */ ret = dma_cookie_status(chan, cookie, txstate); - if (txstate && ret == DMA_SUCCESS) + if (txstate && ret == DMA_COMPLETE) txstate->residue = c->residue; /* unlock */ @@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d) d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; } -static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) -{ - u32 desc; - - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); - desc &= ~0x1f; - return desc; -} - static int cppi41_tear_down_chan(struct cppi41_channel *c) { struct cppi41_dd *cdd = c->cdd; @@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) c->td_retry = 100; } - if (!c->td_seen) { - unsigned td_comp_queue; + if (!c->td_seen || !c->td_desc_seen) { - if (c->is_tx) - td_comp_queue = cdd->td_queue.complete; - else - td_comp_queue = c->q_comp_num; + desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); + if (!desc_phys) + desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - desc_phys = cppi41_pop_desc(cdd, td_comp_queue); - if (desc_phys) { - __iormb(); + if (desc_phys == c->desc_phys) { + c->td_desc_seen = 1; + + } else if (desc_phys == td_desc_phys) { + u32 pd0; - if (desc_phys == td_desc_phys) { - u32 pd0; - pd0 = td->pd0; - WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); - WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); - WARN_ON((pd0 & 0x1f) != c->port_num); - } else { - WARN_ON_ONCE(1); - } - c->td_seen = 1; - } - } - if (!c->td_desc_seen) { - desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - if (desc_phys) { __iormb(); - WARN_ON(c->desc_phys != desc_phys); - c->td_desc_seen = 1; + pd0 = td->pd0; + WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); + WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); + WARN_ON((pd0 & 0x1f) != c->port_num); + c->td_seen = 1; + } else if (desc_phys) { + WARN_ON_ONCE(1); } } c->td_retry--; @@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) WARN_ON(!c->td_retry); if (!c->td_desc_seen) { - desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); + desc_phys = cppi41_pop_desc(cdd, c->q_num); WARN_ON(!desc_phys); } @@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd) } } -static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) +static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) { struct cppi41_channel *cchan; int i; int ret; u32 n_chans; - ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", + ret = of_property_read_u32(dev->of_node, "#dma-channels", &n_chans); if (ret) return ret; @@ -719,7 +711,7 @@ err: return -ENOMEM; } -static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static void purge_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int mem_decs; int i; @@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); - dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, + dma_free_coherent(dev, mem_decs, cdd->cd, cdd->descs_phys); } } @@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd) cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); } -static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) +static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) { disable_sched(cdd); - purge_descs(pdev, cdd); + purge_descs(dev, cdd); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); - dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, + dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, cdd->scratch_phys); } -static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int desc_size; unsigned int mem_decs; @@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) reg |= ilog2(ALLOC_DECS_NUM) - 5; BUILD_BUG_ON(DESCS_AREAS != 1); - cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, + cdd->cd = dma_alloc_coherent(dev, mem_decs, &cdd->descs_phys, GFP_KERNEL); if (!cdd->cd) return -ENOMEM; @@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd) cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); } -static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) { int ret; BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); - cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, + cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, &cdd->scratch_phys, GFP_KERNEL); if (!cdd->qmgr_scratch) return -ENOMEM; @@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); - ret = init_descs(pdev, cdd); + ret = init_descs(dev, cdd); if (ret) goto err_td; @@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) init_sched(cdd); return 0; err_td: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); return ret; } @@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = { }; MODULE_DEVICE_TABLE(of, cppi41_dma_ids); -static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) +static const struct cppi_glue_infos *get_glue_info(struct device *dev) { const struct of_device_id *of_id; - of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); + of_id = of_match_node(cppi41_dma_ids, dev->of_node); if (!of_id) return NULL; return of_id->data; @@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) static int cppi41_dma_probe(struct platform_device *pdev) { struct cppi41_dd *cdd; + struct device *dev = &pdev->dev; const struct cppi_glue_infos *glue_info; int irq; int ret; - glue_info = get_glue_info(pdev); + glue_info = get_glue_info(dev); if (!glue_info) return -EINVAL; @@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; cdd->ddev.device_control = cppi41_dma_control; - cdd->ddev.dev = &pdev->dev; + cdd->ddev.dev = dev; INIT_LIST_HEAD(&cdd->ddev.channels); cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; - cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); - cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); - cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); - cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); + cdd->usbss_mem = of_iomap(dev->of_node, 0); + cdd->ctrl_mem = of_iomap(dev->of_node, 1); + cdd->sched_mem = of_iomap(dev->of_node, 2); + cdd->qmgr_mem = of_iomap(dev->of_node, 3); if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || !cdd->qmgr_mem) { @@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) goto err_remap; } - pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret) + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) goto err_get_sync; cdd->queues_rx = glue_info->queues_rx; cdd->queues_tx = glue_info->queues_tx; cdd->td_queue = glue_info->td_queue; - ret = init_cppi41(pdev, cdd); + ret = init_cppi41(dev, cdd); if (ret) goto err_init_cppi; - ret = cppi41_add_chans(pdev, cdd); + ret = cppi41_add_chans(dev, cdd); if (ret) goto err_chans; - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) goto err_irq; cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); ret = request_irq(irq, glue_info->isr, IRQF_SHARED, - dev_name(&pdev->dev), cdd); + dev_name(dev), cdd); if (ret) goto err_irq; cdd->irq = irq; @@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) if (ret) goto err_dma_reg; - ret = of_dma_controller_register(pdev->dev.of_node, + ret = of_dma_controller_register(dev->of_node, cppi41_dma_xlate, &cpp41_dma_info); if (ret) goto err_of; @@ -1009,11 +1002,11 @@ err_irq: cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); cleanup_chans(cdd); err_chans: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); err_init_cppi: - pm_runtime_put(&pdev->dev); + pm_runtime_put(dev); err_get_sync: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); free_irq(cdd->irq, cdd); cleanup_chans(cdd); - deinit_cpii41(pdev, cdd); + deinit_cppi41(&pdev->dev, cdd); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int cppi41_suspend(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + + cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); + disable_sched(cdd); + + return 0; +} + +static int cppi41_resume(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + struct cppi41_channel *c; + int i; + + for (i = 0; i < DESCS_AREAS; i++) + cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); + + list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) + if (!c->is_tx) + cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); + + init_sched(cdd); + + cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); + cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); + + cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); + static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", .owner = THIS_MODULE, + .pm = &cppi41_pm_ops, .of_match_table = of_match_ptr(cppi41_dma_ids), }, }; diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index b0c0c8268d42..94c380f07538 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, unsigned long flags; status = dma_cookie_status(c, cookie, state); - if (status == DMA_SUCCESS || !state) + if (status == DMA_COMPLETE || !state) return status; spin_lock_irqsave(&chan->vchan.lock, flags); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18f..ea806bdc12ef 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -65,6 +65,7 @@ #include <linux/acpi.h> #include <linux/acpi_dma.h> #include <linux/of_dma.h> +#include <linux/mempool.h> static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDR(dma_idr); @@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device) } EXPORT_SYMBOL(dma_async_device_unregister); -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; - dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); - dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | - DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_COMPL_DEST_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); +#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } +static struct dmaengine_unmap_pool unmap_pool[] = { + __UNMAP_POOL(2), + #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) + __UNMAP_POOL(16), + __UNMAP_POOL(128), + __UNMAP_POOL(256), + #endif +}; - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; +static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) +{ + int order = get_count_order(nr); + + switch (order) { + case 0 ... 1: + return &unmap_pool[0]; + case 2 ... 4: + return &unmap_pool[1]; + case 5 ... 7: + return &unmap_pool[2]; + case 8: + return &unmap_pool[3]; + default: + BUG(); + return NULL; } +} - tx->callback = NULL; - cookie = tx->tx_submit(tx); +static void dmaengine_unmap(struct kref *kref) +{ + struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); + struct device *dev = unmap->dev; + int cnt, i; + + cnt = unmap->to_cnt; + for (i = 0; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_TO_DEVICE); + cnt += unmap->from_cnt; + for (; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_FROM_DEVICE); + cnt += unmap->bidi_cnt; + for (; i < cnt; i++) { + if (unmap->addr[i] == 0) + continue; + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_BIDIRECTIONAL); + } + mempool_free(unmap, __get_unmap_pool(cnt)->pool); +} - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ + if (unmap) + kref_put(&unmap->kref, dmaengine_unmap); +} +EXPORT_SYMBOL_GPL(dmaengine_unmap_put); - return cookie; +static void dmaengine_destroy_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + + if (p->pool) + mempool_destroy(p->pool); + p->pool = NULL; + if (p->cache) + kmem_cache_destroy(p->cache); + p->cache = NULL; + } } -EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -dma_cookie_t -dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) +static int __init dmaengine_init_unmap_pool(void) { - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; + int i; - dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + size_t size; - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; + size = sizeof(struct dmaengine_unmap_data) + + sizeof(dma_addr_t) * p->size; + + p->cache = kmem_cache_create(p->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!p->cache) + break; + p->pool = mempool_create_slab_pool(1, p->cache); + if (!p->pool) + break; } - tx->callback = NULL; - cookie = tx->tx_submit(tx); + if (i == ARRAY_SIZE(unmap_pool)) + return 0; - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); + dmaengine_destroy_unmap_pool(); + return -ENOMEM; +} - return cookie; +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + struct dmaengine_unmap_data *unmap; + + unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); + if (!unmap) + return NULL; + + memset(unmap, 0, sizeof(*unmap)); + kref_init(&unmap->kref); + unmap->dev = dev; + + return unmap; } -EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); +EXPORT_SYMBOL(dmaengine_get_unmap_data); /** * dma_async_memcpy_pg_to_pg - offloaded copy from page to page @@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; unsigned long flags; - dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); + if (!unmap) + return -ENOMEM; + + unmap->to_cnt = 1; + unmap->from_cnt = 1; + unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, + DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, + DMA_FROM_DEVICE); + unmap->len = len; flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); + tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], + len, flags); if (!tx) { - dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); + dmaengine_unmap_put(unmap); return -ENOMEM; } - tx->callback = NULL; + dma_set_unmap(tx, unmap); cookie = tx->tx_submit(tx); + dmaengine_unmap_put(unmap); preempt_disable(); __this_cpu_add(chan->local->bytes_transferred, len); @@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, } EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); +/** + * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * @chan: DMA channel to offload copy to + * @dest: destination address (virtual) + * @src: source address (virtual) + * @len: length + * + * Both @dest and @src must be mappable to a bus address according to the + * DMA mapping API rules for streaming mappings. + * Both @dest and @src must stay memory resident (kernel memory or locked + * user space pages). + */ +dma_cookie_t +dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, + void *src, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), + (unsigned long) dest & ~PAGE_MASK, + virt_to_page(src), + (unsigned long) src & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); + +/** + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page + * @chan: DMA channel to offload copy to + * @page: destination page + * @offset: offset in page to copy to + * @kdata: source address (virtual) + * @len: length + * + * Both @page/@offset and @kdata must be mappable to a bus address according + * to the DMA mapping API rules for streaming mappings. + * Both @page/@offset and @kdata must stay memory resident (kernel memory or + * locked user space pages) + */ +dma_cookie_t +dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, + unsigned int offset, void *kdata, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, page, offset, + virt_to_page(kdata), + (unsigned long) kdata & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); + void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { @@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); if (!tx) - return DMA_SUCCESS; + return DMA_COMPLETE; while (tx->cookie == -EBUSY) { if (time_after_eq(jiffies, dma_sync_wait_timeout)) { @@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); static int __init dma_bus_init(void) { + int err = dmaengine_init_unmap_pool(); + + if (err) + return err; return class_register(&dma_devclass); } arch_initcall(dma_bus_init); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 92f796cdc6ab..20f9a3aaf926 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -8,6 +8,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> @@ -19,10 +21,6 @@ #include <linux/random.h> #include <linux/slab.h> #include <linux/wait.h> -#include <linux/ctype.h> -#include <linux/debugfs.h> -#include <linux/uaccess.h> -#include <linux/seq_file.h> static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); @@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); -/* Maximum amount of mismatched bytes in buffer to print */ -#define MAX_ERROR_COUNT 32 - -/* - * Initialization patterns. All bytes in the source buffer has bit 7 - * set, all bytes in the destination buffer has bit 7 cleared. - * - * Bit 6 is set for all bytes which are to be copied by the DMA - * engine. Bit 5 is set for all bytes which are to be overwritten by - * the DMA engine. - * - * The remaining bits are the inverse of a counter which increments by - * one for each byte address. - */ -#define PATTERN_SRC 0x80 -#define PATTERN_DST 0x00 -#define PATTERN_COPY 0x40 -#define PATTERN_OVERWRITE 0x20 -#define PATTERN_COUNT_MASK 0x1f - -enum dmatest_error_type { - DMATEST_ET_OK, - DMATEST_ET_MAP_SRC, - DMATEST_ET_MAP_DST, - DMATEST_ET_PREP, - DMATEST_ET_SUBMIT, - DMATEST_ET_TIMEOUT, - DMATEST_ET_DMA_ERROR, - DMATEST_ET_DMA_IN_PROGRESS, - DMATEST_ET_VERIFY, - DMATEST_ET_VERIFY_BUF, -}; - -struct dmatest_verify_buffer { - unsigned int index; - u8 expected; - u8 actual; -}; - -struct dmatest_verify_result { - unsigned int error_count; - struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; - u8 pattern; - bool is_srcbuf; -}; - -struct dmatest_thread_result { - struct list_head node; - unsigned int n; - unsigned int src_off; - unsigned int dst_off; - unsigned int len; - enum dmatest_error_type type; - union { - unsigned long data; - dma_cookie_t cookie; - enum dma_status status; - int error; - struct dmatest_verify_result *vr; - }; -}; - -struct dmatest_result { - struct list_head node; - char *name; - struct list_head results; -}; - -struct dmatest_info; - -struct dmatest_thread { - struct list_head node; - struct dmatest_info *info; - struct task_struct *task; - struct dma_chan *chan; - u8 **srcs; - u8 **dsts; - enum dma_transaction_type type; - bool done; -}; +static bool noverify; +module_param(noverify, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); -struct dmatest_chan { - struct list_head node; - struct dma_chan *chan; - struct list_head threads; -}; +static bool verbose; +module_param(verbose, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); /** * struct dmatest_params - test parameters. @@ -177,6 +96,7 @@ struct dmatest_params { unsigned int xor_sources; unsigned int pq_sources; int timeout; + bool noverify; }; /** @@ -184,7 +104,7 @@ struct dmatest_params { * @params: test parameters * @lock: access protection to the fields of this structure */ -struct dmatest_info { +static struct dmatest_info { /* Test parameters */ struct dmatest_params params; @@ -192,16 +112,95 @@ struct dmatest_info { struct list_head channels; unsigned int nr_channels; struct mutex lock; + bool did_init; +} test_info = { + .channels = LIST_HEAD_INIT(test_info.channels), + .lock = __MUTEX_INITIALIZER(test_info.lock), +}; + +static int dmatest_run_set(const char *val, const struct kernel_param *kp); +static int dmatest_run_get(char *val, const struct kernel_param *kp); +static struct kernel_param_ops run_ops = { + .set = dmatest_run_set, + .get = dmatest_run_get, +}; +static bool dmatest_run; +module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(run, "Run the test (default: false)"); + +/* Maximum amount of mismatched bytes in buffer to print */ +#define MAX_ERROR_COUNT 32 + +/* + * Initialization patterns. All bytes in the source buffer has bit 7 + * set, all bytes in the destination buffer has bit 7 cleared. + * + * Bit 6 is set for all bytes which are to be copied by the DMA + * engine. Bit 5 is set for all bytes which are to be overwritten by + * the DMA engine. + * + * The remaining bits are the inverse of a counter which increments by + * one for each byte address. + */ +#define PATTERN_SRC 0x80 +#define PATTERN_DST 0x00 +#define PATTERN_COPY 0x40 +#define PATTERN_OVERWRITE 0x20 +#define PATTERN_COUNT_MASK 0x1f - /* debugfs related stuff */ - struct dentry *root; +struct dmatest_thread { + struct list_head node; + struct dmatest_info *info; + struct task_struct *task; + struct dma_chan *chan; + u8 **srcs; + u8 **dsts; + enum dma_transaction_type type; + bool done; +}; - /* Test results */ - struct list_head results; - struct mutex results_lock; +struct dmatest_chan { + struct list_head node; + struct dma_chan *chan; + struct list_head threads; }; -static struct dmatest_info test_info; +static DECLARE_WAIT_QUEUE_HEAD(thread_wait); +static bool wait; + +static bool is_threaded_test_run(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) + return true; + } + } + + return false; +} + +static int dmatest_wait_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_params *params = &info->params; + + if (params->iterations) + wait_event(thread_wait, !is_threaded_test_run(info)); + wait = true; + return param_get_bool(val, kp); +} + +static struct kernel_param_ops wait_ops = { + .get = dmatest_wait_get, + .set = param_set_bool, +}; +module_param_cb(wait, &wait_ops, &wait, S_IRUGO); +MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); static bool dmatest_match_channel(struct dmatest_params *params, struct dma_chan *chan) @@ -223,7 +222,7 @@ static unsigned long dmatest_random(void) { unsigned long buf; - get_random_bytes(&buf, sizeof(buf)); + prandom_bytes(&buf, sizeof(buf)); return buf; } @@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, } } -static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, - unsigned int start, unsigned int end, unsigned int counter, - u8 pattern, bool is_srcbuf) +static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, + unsigned int counter, bool is_srcbuf) +{ + u8 diff = actual ^ pattern; + u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); + const char *thread_name = current->comm; + + if (is_srcbuf) + pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if ((pattern & PATTERN_COPY) + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) + pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if (diff & PATTERN_SRC) + pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else + pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", + thread_name, index, expected, actual); +} + +static unsigned int dmatest_verify(u8 **bufs, unsigned int start, + unsigned int end, unsigned int counter, u8 pattern, + bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; @@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, u8 expected; u8 *buf; unsigned int counter_orig = counter; - struct dmatest_verify_buffer *vb; for (; (buf = *bufs); bufs++) { counter = counter_orig; @@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { - if (error_count < MAX_ERROR_COUNT && vr) { - vb = &vr->data[error_count]; - vb->index = i; - vb->expected = expected; - vb->actual = actual; - } + if (error_count < MAX_ERROR_COUNT) + dmatest_mismatch(actual, pattern, i, + counter, is_srcbuf); error_count++; } counter++; @@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, } if (error_count > MAX_ERROR_COUNT) - pr_warning("%s: %u errors suppressed\n", + pr_warn("%s: %u errors suppressed\n", current->comm, error_count - MAX_ERROR_COUNT); return error_count; @@ -313,20 +330,6 @@ static void dmatest_callback(void *arg) wake_up_all(done->wait); } -static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); -} - -static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); -} - static unsigned int min_odd(unsigned int x, unsigned int y) { unsigned int val = min(x, y); @@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y) return val % 2 ? val : val - 1; } -static char *verify_result_get_one(struct dmatest_verify_result *vr, - unsigned int i) +static void result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, unsigned long data) { - struct dmatest_verify_buffer *vb = &vr->data[i]; - u8 diff = vb->actual ^ vr->pattern; - static char buf[512]; - char *msg; - - if (vr->is_srcbuf) - msg = "srcbuf overwritten!"; - else if ((vr->pattern & PATTERN_COPY) - && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) - msg = "dstbuf not copied!"; - else if (diff & PATTERN_SRC) - msg = "dstbuf was copied!"; - else - msg = "dstbuf mismatch!"; - - snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, - vb->index, vb->expected, vb->actual); - - return buf; + pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } -static char *thread_result_get(const char *name, - struct dmatest_thread_result *tr) +static void dbg_result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, + unsigned long data) { - static const char * const messages[] = { - [DMATEST_ET_OK] = "No errors", - [DMATEST_ET_MAP_SRC] = "src mapping error", - [DMATEST_ET_MAP_DST] = "dst mapping error", - [DMATEST_ET_PREP] = "prep error", - [DMATEST_ET_SUBMIT] = "submit error", - [DMATEST_ET_TIMEOUT] = "test timed out", - [DMATEST_ET_DMA_ERROR] = - "got completion callback (DMA_ERROR)", - [DMATEST_ET_DMA_IN_PROGRESS] = - "got completion callback (DMA_IN_PROGRESS)", - [DMATEST_ET_VERIFY] = "errors", - [DMATEST_ET_VERIFY_BUF] = "verify errors", - }; - static char buf[512]; - - snprintf(buf, sizeof(buf) - 1, - "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", - name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, - tr->len, tr->data); - - return buf; + pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } -static int thread_result_add(struct dmatest_info *info, - struct dmatest_result *r, enum dmatest_error_type type, - unsigned int n, unsigned int src_off, unsigned int dst_off, - unsigned int len, unsigned long data) -{ - struct dmatest_thread_result *tr; - - tr = kzalloc(sizeof(*tr), GFP_KERNEL); - if (!tr) - return -ENOMEM; - - tr->type = type; - tr->n = n; - tr->src_off = src_off; - tr->dst_off = dst_off; - tr->len = len; - tr->data = data; +#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ + if (verbose) \ + result(err, n, src_off, dst_off, len, data); \ + else \ + dbg_result(err, n, src_off, dst_off, len, data); \ +}) - mutex_lock(&info->results_lock); - list_add_tail(&tr->node, &r->results); - mutex_unlock(&info->results_lock); - - if (tr->type == DMATEST_ET_OK) - pr_debug("%s\n", thread_result_get(r->name, tr)); - else - pr_warn("%s\n", thread_result_get(r->name, tr)); - - return 0; -} - -static unsigned int verify_result_add(struct dmatest_info *info, - struct dmatest_result *r, unsigned int n, - unsigned int src_off, unsigned int dst_off, unsigned int len, - u8 **bufs, int whence, unsigned int counter, u8 pattern, - bool is_srcbuf) +static unsigned long long dmatest_persec(s64 runtime, unsigned int val) { - struct dmatest_verify_result *vr; - unsigned int error_count; - unsigned int buf_off = is_srcbuf ? src_off : dst_off; - unsigned int start, end; - - if (whence < 0) { - start = 0; - end = buf_off; - } else if (whence > 0) { - start = buf_off + len; - end = info->params.buf_size; - } else { - start = buf_off; - end = buf_off + len; - } + unsigned long long per_sec = 1000000; - vr = kmalloc(sizeof(*vr), GFP_KERNEL); - if (!vr) { - pr_warn("dmatest: No memory to store verify result\n"); - return dmatest_verify(NULL, bufs, start, end, counter, pattern, - is_srcbuf); - } - - vr->pattern = pattern; - vr->is_srcbuf = is_srcbuf; - - error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, - is_srcbuf); - if (error_count) { - vr->error_count = error_count; - thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, - dst_off, len, (unsigned long)vr); - return error_count; - } - - kfree(vr); - return 0; -} - -static void result_free(struct dmatest_info *info, const char *name) -{ - struct dmatest_result *r, *_r; - - mutex_lock(&info->results_lock); - list_for_each_entry_safe(r, _r, &info->results, node) { - struct dmatest_thread_result *tr, *_tr; - - if (name && strcmp(r->name, name)) - continue; - - list_for_each_entry_safe(tr, _tr, &r->results, node) { - if (tr->type == DMATEST_ET_VERIFY_BUF) - kfree(tr->vr); - list_del(&tr->node); - kfree(tr); - } + if (runtime <= 0) + return 0; - kfree(r->name); - list_del(&r->node); - kfree(r); + /* drop precision until runtime is 32-bits */ + while (runtime > UINT_MAX) { + runtime >>= 1; + per_sec <<= 1; } - mutex_unlock(&info->results_lock); + per_sec *= val; + do_div(per_sec, runtime); + return per_sec; } -static struct dmatest_result *result_init(struct dmatest_info *info, - const char *name) +static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) { - struct dmatest_result *r; - - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (r) { - r->name = kstrdup(name, GFP_KERNEL); - INIT_LIST_HEAD(&r->results); - mutex_lock(&info->results_lock); - list_add_tail(&r->node, &info->results); - mutex_unlock(&info->results_lock); - } - return r; + return dmatest_persec(runtime, len >> 10); } /* @@ -525,7 +405,6 @@ static int dmatest_func(void *data) struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; - const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; unsigned int failed_tests = 0; @@ -538,9 +417,10 @@ static int dmatest_func(void *data) int src_cnt; int dst_cnt; int i; - struct dmatest_result *result; + ktime_t ktime; + s64 runtime = 0; + unsigned long long total_len = 0; - thread_name = current->comm; set_freezable(); ret = -ENOMEM; @@ -570,10 +450,6 @@ static int dmatest_func(void *data) } else goto err_thread_type; - result = result_init(info, thread_name); - if (!result) - goto err_srcs; - thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; @@ -597,17 +473,17 @@ static int dmatest_func(void *data) set_user_nice(current, 10); /* - * src buffers are freed by the DMAEngine code with dma_unmap_single() - * dst buffers are freed by ourselves below + * src and dst buffers are freed by ourselves below */ - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT - | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + ktime = ktime_get(); while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t dma_srcs[src_cnt]; - dma_addr_t dma_dsts[dst_cnt]; + struct dmaengine_unmap_data *um; + dma_addr_t srcs[src_cnt]; + dma_addr_t *dsts; u8 align = 0; total_tests++; @@ -626,81 +502,103 @@ static int dmatest_func(void *data) break; } - len = dmatest_random() % params->buf_size + 1; + if (params->noverify) { + len = params->buf_size; + src_off = 0; + dst_off = 0; + } else { + len = dmatest_random() % params->buf_size + 1; + len = (len >> align) << align; + if (!len) + len = 1 << align; + src_off = dmatest_random() % (params->buf_size - len + 1); + dst_off = dmatest_random() % (params->buf_size - len + 1); + + src_off = (src_off >> align) << align; + dst_off = (dst_off >> align) << align; + + dmatest_init_srcs(thread->srcs, src_off, len, + params->buf_size); + dmatest_init_dsts(thread->dsts, dst_off, len, + params->buf_size); + } + len = (len >> align) << align; if (!len) len = 1 << align; - src_off = dmatest_random() % (params->buf_size - len + 1); - dst_off = dmatest_random() % (params->buf_size - len + 1); + total_len += len; - src_off = (src_off >> align) << align; - dst_off = (dst_off >> align) << align; - - dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); - dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); + um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, + GFP_KERNEL); + if (!um) { + failed_tests++; + result("unmap data NULL", total_tests, + src_off, dst_off, len, ret); + continue; + } + um->len = params->buf_size; for (i = 0; i < src_cnt; i++) { - u8 *buf = thread->srcs[i] + src_off; - - dma_srcs[i] = dma_map_single(dev->dev, buf, len, - DMA_TO_DEVICE); - ret = dma_mapping_error(dev->dev, dma_srcs[i]); + unsigned long buf = (unsigned long) thread->srcs[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + um->addr[i] = dma_map_page(dev->dev, pg, pg_off, + um->len, DMA_TO_DEVICE); + srcs[i] = um->addr[i] + src_off; + ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, i); - thread_result_add(info, result, - DMATEST_ET_MAP_SRC, - total_tests, src_off, dst_off, - len, ret); + dmaengine_unmap_put(um); + result("src mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } + um->to_cnt++; } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ + dsts = &um->addr[src_cnt]; for (i = 0; i < dst_cnt; i++) { - dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], - params->buf_size, - DMA_BIDIRECTIONAL); - ret = dma_mapping_error(dev->dev, dma_dsts[i]); + unsigned long buf = (unsigned long) thread->dsts[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - i); - thread_result_add(info, result, - DMATEST_ET_MAP_DST, - total_tests, src_off, dst_off, - len, ret); + dmaengine_unmap_put(um); + result("dst mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } + um->bidi_cnt++; } if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, - dma_dsts[0] + dst_off, - dma_srcs[0], len, - flags); + dsts[0] + dst_off, + srcs[0], len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, - dma_dsts[0] + dst_off, - dma_srcs, src_cnt, + dsts[0] + dst_off, + srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; for (i = 0; i < dst_cnt; i++) - dma_pq[i] = dma_dsts[i] + dst_off; - tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, + dma_pq[i] = dsts[i] + dst_off; + tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, src_cnt, pq_coefs, len, flags); } if (!tx) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - dst_cnt); - thread_result_add(info, result, DMATEST_ET_PREP, - total_tests, src_off, dst_off, - len, 0); + dmaengine_unmap_put(um); + result("prep error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -712,9 +610,9 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - thread_result_add(info, result, DMATEST_ET_SUBMIT, - total_tests, src_off, dst_off, - len, cookie); + dmaengine_unmap_put(um); + result("submit error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -735,59 +633,59 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ - thread_result_add(info, result, DMATEST_ET_TIMEOUT, - total_tests, src_off, dst_off, - len, 0); + dmaengine_unmap_put(um); + result("test timed out", total_tests, src_off, dst_off, + len, 0); failed_tests++; continue; - } else if (status != DMA_SUCCESS) { - enum dmatest_error_type type = (status == DMA_ERROR) ? - DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; - thread_result_add(info, result, type, - total_tests, src_off, dst_off, - len, status); + } else if (status != DMA_COMPLETE) { + dmaengine_unmap_put(um); + result(status == DMA_ERROR ? + "completion error status" : + "completion busy status", total_tests, src_off, + dst_off, len, ret); failed_tests++; continue; } - /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); + dmaengine_unmap_put(um); - error_count = 0; + if (params->noverify) { + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); + continue; + } - pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, -1, + pr_debug("%s: verifying source buffer...\n", current->comm); + error_count = dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 0, - src_off, PATTERN_SRC | PATTERN_COPY, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 1, - src_off + len, PATTERN_SRC, true); - - pr_debug("%s: verifying dest buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, -1, + error_count += dmatest_verify(thread->srcs, src_off, + src_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, true); + error_count += dmatest_verify(thread->srcs, src_off + len, + params->buf_size, src_off + len, + PATTERN_SRC, true); + + pr_debug("%s: verifying dest buffer...\n", current->comm); + error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 0, - src_off, PATTERN_SRC | PATTERN_COPY, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 1, - dst_off + len, PATTERN_DST, false); + error_count += dmatest_verify(thread->dsts, dst_off, + dst_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, false); + error_count += dmatest_verify(thread->dsts, dst_off + len, + params->buf_size, dst_off + len, + PATTERN_DST, false); if (error_count) { - thread_result_add(info, result, DMATEST_ET_VERIFY, - total_tests, src_off, dst_off, - len, error_count); + result("data error", total_tests, src_off, dst_off, + len, error_count); failed_tests++; } else { - thread_result_add(info, result, DMATEST_ET_OK, - total_tests, src_off, dst_off, - len, 0); + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); } } + runtime = ktime_us_delta(ktime_get(), ktime); ret = 0; for (i = 0; thread->dsts[i]; i++) @@ -802,20 +700,17 @@ err_srcbuf: err_srcs: kfree(pq_coefs); err_thread_type: - pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", - thread_name, total_tests, failed_tests, ret); + pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", + current->comm, total_tests, failed_tests, + dmatest_persec(runtime, total_tests), + dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ if (ret) dmaengine_terminate_all(chan); thread->done = true; - - if (params->iterations > 0) - while (!kthread_should_stop()) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); - interruptible_sleep_on(&wait_dmatest_exit); - } + wake_up(&thread_wait); return ret; } @@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { ret = kthread_stop(thread->task); - pr_debug("dmatest: thread %s exited with status %d\n", - thread->task->comm, ret); + pr_debug("thread %s exited with status %d\n", + thread->task->comm, ret); list_del(&thread->node); + put_task_struct(thread->task); kfree(thread); } @@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info, for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { - pr_warning("dmatest: No memory for %s-%s%u\n", - dma_chan_name(chan), op, i); - + pr_warn("No memory for %s-%s%u\n", + dma_chan_name(chan), op, i); break; } thread->info = info; thread->chan = dtc->chan; thread->type = type; smp_wmb(); - thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", + thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { - pr_warning("dmatest: Failed to run thread %s-%s%u\n", - dma_chan_name(chan), op, i); + pr_warn("Failed to create thread %s-%s%u\n", + dma_chan_name(chan), op, i); kfree(thread); break; } /* srcbuf and dstbuf are allocated by the thread itself */ - + get_task_struct(thread->task); list_add_tail(&thread->node, &dtc->threads); + wake_up_process(thread->task); } return i; @@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info, dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { - pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); + pr_warn("No memory for %s\n", dma_chan_name(chan)); return -ENOMEM; } @@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info, thread_count += cnt > 0 ? cnt : 0; } - pr_info("dmatest: Started %u threads using %s\n", + pr_info("Started %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); @@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param) return true; } -static int __run_threaded_test(struct dmatest_info *info) +static void request_channels(struct dmatest_info *info, + enum dma_transaction_type type) { dma_cap_mask_t mask; - struct dma_chan *chan; - struct dmatest_params *params = &info->params; - int err = 0; dma_cap_zero(mask); - dma_cap_set(DMA_MEMCPY, mask); + dma_cap_set(type, mask); for (;;) { + struct dmatest_params *params = &info->params; + struct dma_chan *chan; + chan = dma_request_channel(mask, filter, params); if (chan) { - err = dmatest_add_channel(info, chan); - if (err) { + if (dmatest_add_channel(info, chan)) { dma_release_channel(chan); break; /* add_channel failed, punt */ } @@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info) info->nr_channels >= params->max_channels) break; /* we have all we need */ } - return err; } -#ifndef MODULE -static int run_threaded_test(struct dmatest_info *info) +static void run_threaded_test(struct dmatest_info *info) { - int ret; + struct dmatest_params *params = &info->params; - mutex_lock(&info->lock); - ret = __run_threaded_test(info); - mutex_unlock(&info->lock); - return ret; + /* Copy test parameters */ + params->buf_size = test_buf_size; + strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); + strlcpy(params->device, strim(test_device), sizeof(params->device)); + params->threads_per_chan = threads_per_chan; + params->max_channels = max_channels; + params->iterations = iterations; + params->xor_sources = xor_sources; + params->pq_sources = pq_sources; + params->timeout = timeout; + params->noverify = noverify; + + request_channels(info, DMA_MEMCPY); + request_channels(info, DMA_XOR); + request_channels(info, DMA_PQ); } -#endif -static void __stop_threaded_test(struct dmatest_info *info) +static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; @@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info) list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); - pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); + pr_debug("dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } info->nr_channels = 0; } -static void stop_threaded_test(struct dmatest_info *info) +static void restart_threaded_test(struct dmatest_info *info, bool run) { - mutex_lock(&info->lock); - __stop_threaded_test(info); - mutex_unlock(&info->lock); -} - -static int __restart_threaded_test(struct dmatest_info *info, bool run) -{ - struct dmatest_params *params = &info->params; + /* we might be called early to set run=, defer running until all + * parameters have been evaluated + */ + if (!info->did_init) + return; /* Stop any running test first */ - __stop_threaded_test(info); - - if (run == false) - return 0; - - /* Clear results from previous run */ - result_free(info, NULL); - - /* Copy test parameters */ - params->buf_size = test_buf_size; - strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); - strlcpy(params->device, strim(test_device), sizeof(params->device)); - params->threads_per_chan = threads_per_chan; - params->max_channels = max_channels; - params->iterations = iterations; - params->xor_sources = xor_sources; - params->pq_sources = pq_sources; - params->timeout = timeout; + stop_threaded_test(info); /* Run test with new parameters */ - return __run_threaded_test(info); -} - -static bool __is_threaded_test_run(struct dmatest_info *info) -{ - struct dmatest_chan *dtc; - - list_for_each_entry(dtc, &info->channels, node) { - struct dmatest_thread *thread; - - list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) - return true; - } - } - - return false; + run_threaded_test(info); } -static ssize_t dtf_read_run(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static int dmatest_run_get(char *val, const struct kernel_param *kp) { - struct dmatest_info *info = file->private_data; - char buf[3]; + struct dmatest_info *info = &test_info; mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) { - buf[0] = 'Y'; + if (is_threaded_test_run(info)) { + dmatest_run = true; } else { - __stop_threaded_test(info); - buf[0] = 'N'; + stop_threaded_test(info); + dmatest_run = false; } - mutex_unlock(&info->lock); - buf[1] = '\n'; - buf[2] = 0x00; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dmatest_info *info = file->private_data; - char buf[16]; - bool bv; - int ret = 0; - if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) - return -EFAULT; - - if (strtobool(buf, &bv) == 0) { - mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) - ret = -EBUSY; - else - ret = __restart_threaded_test(info, bv); - - mutex_unlock(&info->lock); - } - - return ret ? ret : count; + return param_get_bool(val, kp); } -static const struct file_operations dtf_run_fops = { - .read = dtf_read_run, - .write = dtf_write_run, - .open = simple_open, - .llseek = default_llseek, -}; - -static int dtf_results_show(struct seq_file *sf, void *data) +static int dmatest_run_set(const char *val, const struct kernel_param *kp) { - struct dmatest_info *info = sf->private; - struct dmatest_result *result; - struct dmatest_thread_result *tr; - unsigned int i; + struct dmatest_info *info = &test_info; + int ret; - mutex_lock(&info->results_lock); - list_for_each_entry(result, &info->results, node) { - list_for_each_entry(tr, &result->results, node) { - seq_printf(sf, "%s\n", - thread_result_get(result->name, tr)); - if (tr->type == DMATEST_ET_VERIFY_BUF) { - for (i = 0; i < tr->vr->error_count; i++) { - seq_printf(sf, "\t%s\n", - verify_result_get_one(tr->vr, i)); - } - } - } + mutex_lock(&info->lock); + ret = param_set_bool(val, kp); + if (ret) { + mutex_unlock(&info->lock); + return ret; } - mutex_unlock(&info->results_lock); - return 0; -} - -static int dtf_results_open(struct inode *inode, struct file *file) -{ - return single_open(file, dtf_results_show, inode->i_private); -} - -static const struct file_operations dtf_results_fops = { - .open = dtf_results_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int dmatest_register_dbgfs(struct dmatest_info *info) -{ - struct dentry *d; - - d = debugfs_create_dir("dmatest", NULL); - if (IS_ERR(d)) - return PTR_ERR(d); - if (!d) - goto err_root; + if (is_threaded_test_run(info)) + ret = -EBUSY; + else if (dmatest_run) + restart_threaded_test(info, dmatest_run); - info->root = d; - - /* Run or stop threaded test */ - debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, - &dtf_run_fops); - - /* Results of test in progress */ - debugfs_create_file("results", S_IRUGO, info->root, info, - &dtf_results_fops); - - return 0; + mutex_unlock(&info->lock); -err_root: - pr_err("dmatest: Failed to initialize debugfs\n"); - return -ENOMEM; + return ret; } static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; - int ret; - - memset(info, 0, sizeof(*info)); + struct dmatest_params *params = &info->params; - mutex_init(&info->lock); - INIT_LIST_HEAD(&info->channels); + if (dmatest_run) { + mutex_lock(&info->lock); + run_threaded_test(info); + mutex_unlock(&info->lock); + } - mutex_init(&info->results_lock); - INIT_LIST_HEAD(&info->results); + if (params->iterations && wait) + wait_event(thread_wait, !is_threaded_test_run(info)); - ret = dmatest_register_dbgfs(info); - if (ret) - return ret; + /* module parameters are stable, inittime tests are started, + * let userspace take over 'run' control + */ + info->did_init = true; -#ifdef MODULE return 0; -#else - return run_threaded_test(info); -#endif } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); @@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void) { struct dmatest_info *info = &test_info; - debugfs_remove_recursive(info->root); + mutex_lock(&info->lock); stop_threaded_test(info); - result_free(info, NULL); + mutex_unlock(&info->lock); } module_exit(dmatest_exit); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 89eb89f22284..7516be4677cf 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { @@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); - if (!is_slave_direction(dwc->direction)) { - struct device *parent = chan2parent(&dwc->chan); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); spin_unlock_irqrestore(&dwc->lock, flags); if (callback) @@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dwc_scan_descriptors(to_dw_dma(chan->device), dwc); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, dwc_get_residue(dwc)); if (dwc->paused && ret == DMA_IN_PROGRESS) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index bef8a368c8dd..2539ea0cbc63 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -46,14 +46,21 @@ #define EDMA_CHANS 64 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ -/* Max of 16 segments per channel to conserve PaRAM slots */ -#define MAX_NR_SG 16 +/* + * Max of 20 segments per channel to conserve PaRAM slots + * Also note that MAX_NR_SG should be atleast the no.of periods + * that are required for ASoC, otherwise DMA prep calls will + * fail. Today davinci-pcm is the only user of this driver and + * requires atleast 17 slots, so we setup the default to 20. + */ +#define MAX_NR_SG 20 #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 struct edma_desc { struct virt_dma_desc vdesc; struct list_head node; + int cyclic; int absync; int pset_nr; int processed; @@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan) * then setup a link to the dummy slot, this results in all future * events being absorbed and that's OK because we're done */ - if (edesc->processed == edesc->pset_nr) - edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); + if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) + edma_link(echan->slot[nslots-1], echan->slot[1]); + else + edma_link(echan->slot[nslots-1], + echan->ecc->dummy_slot); + } edma_resume(echan->ch_num); @@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return ret; } +/* + * A PaRAM set configuration abstraction used by other modes + * @chan: Channel who's PaRAM set we're configuring + * @pset: PaRAM set to initialize and setup. + * @src_addr: Source address of the DMA + * @dst_addr: Destination address of the DMA + * @burst: In units of dev_width, how much to send + * @dev_width: How much is the dev_width + * @dma_length: Total length of the DMA transfer + * @direction: Direction of the transfer + */ +static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, + dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, + enum dma_slave_buswidth dev_width, unsigned int dma_length, + enum dma_transfer_direction direction) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + int acnt, bcnt, ccnt, cidx; + int src_bidx, dst_bidx, src_cidx, dst_cidx; + int absync; + + acnt = dev_width; + /* + * If the maxburst is equal to the fifo width, use + * A-synced transfers. This allows for large contiguous + * buffer transfers using only one PaRAM set. + */ + if (burst == 1) { + /* + * For the A-sync case, bcnt and ccnt are the remainder + * and quotient respectively of the division of: + * (dma_length / acnt) by (SZ_64K -1). This is so + * that in case bcnt over flows, we have ccnt to use. + * Note: In A-sync tranfer only, bcntrld is used, but it + * only applies for sg_dma_len(sg) >= SZ_64K. + * In this case, the best way adopted is- bccnt for the + * first frame will be the remainder below. Then for + * every successive frame, bcnt will be SZ_64K-1. This + * is assured as bcntrld = 0xffff in end of function. + */ + absync = false; + ccnt = dma_length / acnt / (SZ_64K - 1); + bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); + /* + * If bcnt is non-zero, we have a remainder and hence an + * extra frame to transfer, so increment ccnt. + */ + if (bcnt) + ccnt++; + else + bcnt = SZ_64K - 1; + cidx = acnt; + } else { + /* + * If maxburst is greater than the fifo address_width, + * use AB-synced transfers where A count is the fifo + * address_width and B count is the maxburst. In this + * case, we are limited to transfers of C count frames + * of (address_width * maxburst) where C count is limited + * to SZ_64K-1. This places an upper bound on the length + * of an SG segment that can be handled. + */ + absync = true; + bcnt = burst; + ccnt = dma_length / (acnt * bcnt); + if (ccnt > (SZ_64K - 1)) { + dev_err(dev, "Exceeded max SG segment size\n"); + return -EINVAL; + } + cidx = acnt * bcnt; + } + + if (direction == DMA_MEM_TO_DEV) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = 0; + dst_cidx = 0; + } else if (direction == DMA_DEV_TO_MEM) { + src_bidx = 0; + src_cidx = 0; + dst_bidx = acnt; + dst_cidx = cidx; + } else { + dev_err(dev, "%s: direction not implemented yet\n", __func__); + return -EINVAL; + } + + pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); + /* Configure A or AB synchronized transfers */ + if (absync) + pset->opt |= SYNCDIM; + + pset->src = src_addr; + pset->dst = dst_addr; + + pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; + pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; + + pset->a_b_cnt = bcnt << 16 | acnt; + pset->ccnt = ccnt; + /* + * Only time when (bcntrld) auto reload is required is for + * A-sync case, and in this case, a requirement of reload value + * of SZ_64K-1 only is assured. 'link' is initially set to NULL + * and then later will be populated by edma_execute. + */ + pset->link_bcntrld = 0xffffffff; + return absync; +} + static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edma_desc *edesc; - dma_addr_t dev_addr; + dma_addr_t src_addr = 0, dst_addr = 0; enum dma_slave_buswidth dev_width; u32 burst; struct scatterlist *sg; - int acnt, bcnt, ccnt, src, dst, cidx; - int src_bidx, dst_bidx, src_cidx, dst_cidx; - int i, nslots; + int i, nslots, ret; if (unlikely(!echan || !sgl || !sg_len)) return NULL; if (direction == DMA_DEV_TO_MEM) { - dev_addr = echan->cfg.src_addr; + src_addr = echan->cfg.src_addr; dev_width = echan->cfg.src_addr_width; burst = echan->cfg.src_maxburst; } else if (direction == DMA_MEM_TO_DEV) { - dev_addr = echan->cfg.dst_addr; + dst_addr = echan->cfg.dst_addr; dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { @@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( if (echan->slot[i] < 0) { kfree(edesc); dev_err(dev, "Failed to allocate slot\n"); - kfree(edesc); return NULL; } } @@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* Configure PaRAM sets for each SG */ for_each_sg(sgl, sg, sg_len, i) { - - acnt = dev_width; - - /* - * If the maxburst is equal to the fifo width, use - * A-synced transfers. This allows for large contiguous - * buffer transfers using only one PaRAM set. - */ - if (burst == 1) { - edesc->absync = false; - ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); - bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); - if (bcnt) - ccnt++; - else - bcnt = SZ_64K - 1; - cidx = acnt; - /* - * If maxburst is greater than the fifo address_width, - * use AB-synced transfers where A count is the fifo - * address_width and B count is the maxburst. In this - * case, we are limited to transfers of C count frames - * of (address_width * maxburst) where C count is limited - * to SZ_64K-1. This places an upper bound on the length - * of an SG segment that can be handled. - */ - } else { - edesc->absync = true; - bcnt = burst; - ccnt = sg_dma_len(sg) / (acnt * bcnt); - if (ccnt > (SZ_64K - 1)) { - dev_err(dev, "Exceeded max SG segment size\n"); - kfree(edesc); - return NULL; - } - cidx = acnt * bcnt; + /* Get address for each SG */ + if (direction == DMA_DEV_TO_MEM) + dst_addr = sg_dma_address(sg); + else + src_addr = sg_dma_address(sg); + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, + sg_dma_len(sg), direction); + if (ret < 0) { + kfree(edesc); + return NULL; } - if (direction == DMA_MEM_TO_DEV) { - src = sg_dma_address(sg); - dst = dev_addr; - src_bidx = acnt; - src_cidx = cidx; - dst_bidx = 0; - dst_cidx = 0; - } else { - src = dev_addr; - dst = sg_dma_address(sg); - src_bidx = 0; - src_cidx = 0; - dst_bidx = acnt; - dst_cidx = cidx; - } - - edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); - /* Configure A or AB synchronized transfers */ - if (edesc->absync) - edesc->pset[i].opt |= SYNCDIM; + edesc->absync = ret; /* If this is the last in a current SG set of transactions, enable interrupts so that next set is processed */ @@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) edesc->pset[i].opt |= TCINTEN; + } - edesc->pset[i].src = src; - edesc->pset[i].dst = dst; + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} - edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; - edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; +static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long tx_flags, void *context) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + struct edma_desc *edesc; + dma_addr_t src_addr, dst_addr; + enum dma_slave_buswidth dev_width; + u32 burst; + int i, ret, nslots; + + if (unlikely(!echan || !buf_len || !period_len)) + return NULL; + + if (direction == DMA_DEV_TO_MEM) { + src_addr = echan->cfg.src_addr; + dst_addr = buf_addr; + dev_width = echan->cfg.src_addr_width; + burst = echan->cfg.src_maxburst; + } else if (direction == DMA_MEM_TO_DEV) { + src_addr = buf_addr; + dst_addr = echan->cfg.dst_addr; + dev_width = echan->cfg.dst_addr_width; + burst = echan->cfg.dst_maxburst; + } else { + dev_err(dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { + dev_err(dev, "Undefined slave buswidth\n"); + return NULL; + } + + if (unlikely(buf_len % period_len)) { + dev_err(dev, "Period should be multiple of Buffer length\n"); + return NULL; + } + + nslots = (buf_len / period_len) + 1; + + /* + * Cyclic DMA users such as audio cannot tolerate delays introduced + * by cases where the number of periods is more than the maximum + * number of SGs the EDMA driver can handle at a time. For DMA types + * such as Slave SGs, such delays are tolerable and synchronized, + * but the synchronization is difficult to achieve with Cyclic and + * cannot be guaranteed, so we error out early. + */ + if (nslots > MAX_NR_SG) + return NULL; + + edesc = kzalloc(sizeof(*edesc) + nslots * + sizeof(edesc->pset[0]), GFP_ATOMIC); + if (!edesc) { + dev_dbg(dev, "Failed to allocate a descriptor\n"); + return NULL; + } + + edesc->cyclic = 1; + edesc->pset_nr = nslots; + + dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); + dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); + dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); + + for (i = 0; i < nslots; i++) { + /* Allocate a PaRAM slot, if needed */ + if (echan->slot[i] < 0) { + echan->slot[i] = + edma_alloc_slot(EDMA_CTLR(echan->ch_num), + EDMA_SLOT_ANY); + if (echan->slot[i] < 0) { + dev_err(dev, "Failed to allocate slot\n"); + return NULL; + } + } + + if (i == nslots - 1) { + memcpy(&edesc->pset[i], &edesc->pset[0], + sizeof(edesc->pset[0])); + break; + } + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, period_len, + direction); + if (ret < 0) + return NULL; - edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; - edesc->pset[i].ccnt = ccnt; - edesc->pset[i].link_bcntrld = 0xffffffff; + if (direction == DMA_DEV_TO_MEM) + dst_addr += period_len; + else + src_addr += period_len; + dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); + dev_dbg(dev, + "\n pset[%d]:\n" + " chnum\t%d\n" + " slot\t%d\n" + " opt\t%08x\n" + " src\t%08x\n" + " dst\t%08x\n" + " abcnt\t%08x\n" + " ccnt\t%08x\n" + " bidx\t%08x\n" + " cidx\t%08x\n" + " lkrld\t%08x\n", + i, echan->ch_num, echan->slot[i], + edesc->pset[i].opt, + edesc->pset[i].src, + edesc->pset[i].dst, + edesc->pset[i].a_b_cnt, + edesc->pset[i].ccnt, + edesc->pset[i].src_dst_bidx, + edesc->pset[i].src_dst_cidx, + edesc->pset[i].link_bcntrld); + + edesc->absync = ret; + + /* + * Enable interrupts for every period because callback + * has to be called for every period. + */ + edesc->pset[i].opt |= TCINTEN; } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); @@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) unsigned long flags; struct edmacc_param p; - /* Pause the channel */ - edma_pause(echan->ch_num); + edesc = echan->edesc; + + /* Pause the channel for non-cyclic */ + if (!edesc || (edesc && !edesc->cyclic)) + edma_pause(echan->ch_num); switch (ch_status) { - case DMA_COMPLETE: + case EDMA_DMA_COMPLETE: spin_lock_irqsave(&echan->vchan.lock, flags); - edesc = echan->edesc; if (edesc) { - if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) { + vchan_cyclic_callback(&edesc->vdesc); + } else if (edesc->processed == edesc->pset_nr) { dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); edma_stop(echan->ch_num); vchan_cookie_complete(&edesc->vdesc); + edma_execute(echan); } else { dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); + edma_execute(echan); } - - edma_execute(echan); } spin_unlock_irqrestore(&echan->vchan.lock, flags); break; - case DMA_CC_ERROR: + case EDMA_DMA_CC_ERROR: spin_lock_irqsave(&echan->vchan.lock, flags); edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); @@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&echan->vchan.lock, flags); @@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, struct device *dev) { dma->device_prep_slave_sg = edma_prep_slave_sg; + dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; dma->device_alloc_chan_resources = edma_alloc_chan_resources; dma->device_free_chan_resources = edma_free_chan_resources; dma->device_issue_pending = edma_issue_pending; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 591cd8c63abb..cb4bf682a708 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) spin_unlock_irqrestore(&edmac->lock, flags); } -static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) -{ - struct device *dev = desc->txd.chan->device->dev; - - if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - else - dma_unmap_page(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - } - if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - else - dma_unmap_page(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - } -} - static void ep93xx_dma_tasklet(unsigned long data) { struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; @@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) /* Now we can release all the chained descriptors */ list_for_each_entry_safe(desc, d, &list, node) { - /* - * For the memcpy channels the API requires us to unmap the - * buffers unless requested otherwise. - */ - if (!edmac->chan.private) - ep93xx_dma_unmap_buffers(desc); - + dma_descriptor_unmap(&desc->txd); ep93xx_dma_desc_put(edmac, desc); } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 61517dd0d0b7..7086a16a55f2 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -870,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, /* Run any dependencies */ dma_run_dependencies(txd); - /* Unmap the dst buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); - else - dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); - } - - /* Unmap the src buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, src, len, DMA_TO_DEVICE); - else - dma_unmap_page(dev, src, len, DMA_TO_DEVICE); - } - + dma_descriptor_unmap(txd); #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); #endif @@ -1255,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, WARN_ON(fdev->feature != chan->feature); chan->dev = fdev->dev; - chan->id = ((res.start - 0x100) & 0xfff) >> 7; + chan->id = (res.start & 0xfff) < 0x300 ? + ((res.start - 0x100) & 0xfff) >> 7 : + ((res.start - 0x200) & 0xfff) >> 7; if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; @@ -1428,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op) } static const struct of_device_id fsldma_of_ids[] = { + { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} @@ -1449,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = { static __init int fsldma_init(void) { - pr_info("Freescale Elo / Elo Plus DMA driver\n"); + pr_info("Freescale Elo series DMA driver\n"); return platform_driver_register(&fsldma_of_driver); } @@ -1461,5 +1449,5 @@ static void __exit fsldma_exit(void) subsys_initcall(fsldma_init); module_exit(fsldma_exit); -MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); +MODULE_DESCRIPTION("Freescale Elo series DMA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f5c38791fc74..1ffc24484d23 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -112,7 +112,7 @@ struct fsldma_chan_regs { }; struct fsldma_chan; -#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 +#define FSL_DMA_MAX_CHANS_PER_DEVICE 8 struct fsldma_device { void __iomem *regs; /* DGSR register base */ diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 55852c026791..6f9ac2022abd 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " - "dma_length=%d\n", __func__, imxdmac->channel, - d->dest, d->src, d->len); + dev_dbg(imxdma->dev, + "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", + __func__, imxdmac->channel, + (unsigned long long)d->dest, + (unsigned long long)d->src, d->len); break; /* Cyclic transfer is the same as slave_sg with special sg configuration. */ @@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (dev2mem)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else if (d->direction == DMA_MEM_TO_DEV) { imx_dmav1_writel(imxdma, imxdmac->per_address, DMA_DAR(imxdmac->channel)); imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (mem2dev)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else { dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", __func__, imxdmac->channel); @@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) desc->desc.tx_submit = imxdma_tx_submit; /* txd.flags will be overwritten in prep funcs */ desc->desc.flags = DMA_CTRL_ACK; - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; list_add_tail(&desc->node, &imxdmac->ld_free); imxdmac->descs_allocated++; @@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( int i; unsigned int periods = buf_len / period_len; - dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", + dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", __func__, imxdmac->channel, buf_len, period_len); if (list_empty(&imxdmac->ld_free) || @@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", - __func__, imxdmac->channel, src, dest, len); + dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", + __func__, imxdmac->channel, (unsigned long long)src, + (unsigned long long)dest, len); if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) @@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" - " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, - imxdmac->channel, xt->src_start, xt->dst_start, + dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" + " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, + imxdmac->channel, (unsigned long long)xt->src_start, + (unsigned long long) xt->dst_start, xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", xt->numf, xt->frame_size); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c1fd504cae28..c75679d42028 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) if (error) sdmac->status = DMA_ERROR; else - sdmac->status = DMA_SUCCESS; + sdmac->status = DMA_COMPLETE; dma_cookie_complete(&sdmac->desc); if (sdmac->desc.callback) @@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( param &= ~BD_CONT; } - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, count, sg->dma_address, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, count, (u64)sg->dma_address, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); @@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( if (i + 1 == num_periods) param |= BD_WRAP; - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, period_len, dma_addr, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, period_len, (u64)dma_addr, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a975ebebea8a..1aab8130efa1 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, callback_txd(param_txd); } if (midc->raw_tfr) { - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; if (desc->lli != NULL) { pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); @@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { spin_lock_bh(&midc->lock); midc_scan_descriptors(to_middma_device(chan->device), midc); spin_unlock_bh(&midc->lock); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5ff6fc1819dc..1a49c777607c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw) -{ - struct pci_dev *pdev = chan->device->pdev; - size_t offset = len - hw->size; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, hw->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) - ioat_unmap(pdev, hw->src_addr - offset, len, - PCI_DMA_TODEVICE, flags, 0); -} - dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) { dma_addr_t phys_complete; @@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) dump_desc_dbg(ioat, desc); if (tx->cookie) { dma_cookie_complete(tx); - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + dma_descriptor_unmap(tx); ioat->active -= desc->hw->tx_cnt; if (tx->callback) { tx->callback(tx->callback_param); @@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; device->cleanup_fn((unsigned long) c); @@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_INTERRUPT; + flags = DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { @@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) if (tmo == 0 || dma->device_tx_status(dma_chan, cookie, NULL) - != DMA_SUCCESS) { + != DMA_COMPLETE) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto unmap_dma; @@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix"; module_param_string(ioat_interrupt_style, ioat_interrupt_style, sizeof(ioat_interrupt_style), 0644); MODULE_PARM_DESC(ioat_interrupt_style, - "set ioat interrupt style: msix (default), " - "msix-single-vector, msi, intx)"); + "set ioat interrupt style: msix (default), msi, intx"); /** * ioat_dma_setup_interrupts - setup interrupt handler @@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) if (!strcmp(ioat_interrupt_style, "msix")) goto msix; - if (!strcmp(ioat_interrupt_style, "msix-single-vector")) - goto msix_single_vector; if (!strcmp(ioat_interrupt_style, "msi")) goto msi; if (!strcmp(ioat_interrupt_style, "intx")) @@ -920,10 +901,8 @@ msix: device->msix_entries[i].entry = i; err = pci_enable_msix(pdev, device->msix_entries, msixcnt); - if (err < 0) + if (err) goto msi; - if (err > 0) - goto msix_single_vector; for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; @@ -937,29 +916,13 @@ msix: chan = ioat_chan_by_index(device, j); devm_free_irq(dev, msix->vector, chan); } - goto msix_single_vector; + goto msi; } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; device->irq_mode = IOAT_MSIX; goto done; -msix_single_vector: - msix = &device->msix_entries[0]; - msix->entry = 0; - err = pci_enable_msix(pdev, device->msix_entries, 1); - if (err) - goto msi; - - err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, - "ioat-msix", device); - if (err) { - pci_disable_msix(pdev); - goto msi; - } - device->irq_mode = IOAT_MSIX_SINGLE; - goto done; - msi: err = pci_enable_msi(pdev); if (err) @@ -971,7 +934,7 @@ msi: pci_disable_msi(pdev); goto intx; } - device->irq_mode = IOAT_MSIX; + device->irq_mode = IOAT_MSI; goto done; intx: diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9aa..11fb877ddca9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -52,7 +52,6 @@ enum ioat_irq_mode { IOAT_NOIRQ = 0, IOAT_MSIX, - IOAT_MSIX_SINGLE, IOAT_MSI, IOAT_INTX }; @@ -83,7 +82,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; - struct kmem_cache *sed_pool; struct dma_device common; u8 version; struct msix_entry msix_entries[4]; @@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, - int direction, enum dma_ctrl_flags flags, bool dst) -{ - if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || - (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) - pci_unmap_single(pdev, addr, len, direction); - else - pci_unmap_page(pdev, addr, len, direction); -} - int ioat_probe(struct ioatdma_device *device); int ioat_register(struct ioatdma_device *device); int ioat1_dma_probe(struct ioatdma_device *dev, int dca); @@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx); enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, dma_addr_t *phys_complete); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b925e1b1d139..5d3affe7e976 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; dump_desc_dbg(ioat, desc); if (tx->cookie) { - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + dma_descriptor_unmap(tx); dma_cookie_complete(tx); if (tx->callback) { tx->callback(tx->callback_param); diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 212d584fe427..470292767e68 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) int ioat2_dma_probe(struct ioatdma_device *dev, int dca); int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -void ioat3_dma_remove(struct ioatdma_device *dev); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d8ececaf1b57..820817e97e62 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -67,6 +67,8 @@ #include "dma.h" #include "dma_v2.h" +extern struct kmem_cache *ioat3_sed_cache; + /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) @@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6 }; -/* - * technically sources 1 and 2 do not require SED, but the op will have - * at least 9 descriptors so that's irrelevant. - */ -static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1 }; - static void ioat3_eh(struct ioat2_dma_chan *ioat); -static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) -{ - struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; - - return raw->field[xor_idx_to_field[idx]]; -} - static void xor_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, int idx) { @@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], pq->coef[idx] = coef; } -static int sed_get_pq16_pool_idx(int src_cnt) -{ - - return pq16_idx_to_sed[src_cnt]; -} - static bool is_jf_ioat(struct pci_dev *pdev) { switch (pdev->device) { @@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) struct ioat_sed_ent *sed; gfp_t flags = __GFP_ZERO | GFP_ATOMIC; - sed = kmem_cache_alloc(device->sed_pool, flags); + sed = kmem_cache_alloc(ioat3_sed_cache, flags); if (!sed) return NULL; @@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], flags, &sed->dma); if (!sed->hw) { - kmem_cache_free(device->sed_pool, sed); + kmem_cache_free(ioat3_sed_cache, sed); return NULL; } @@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s return; dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); - kmem_cache_free(device->sed_pool, sed); -} - -static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, - struct ioat_ring_ent *desc, int idx) -{ - struct ioat_chan_common *chan = &ioat->base; - struct pci_dev *pdev = chan->device->pdev; - size_t len = desc->len; - size_t offset = len - desc->hw->size; - struct dma_async_tx_descriptor *tx = &desc->txd; - enum dma_ctrl_flags flags = tx->flags; - - switch (desc->hw->ctl_f.op) { - case IOAT_OP_COPY: - if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ - ioat_dma_unmap(chan, flags, len, desc->hw); - break; - case IOAT_OP_XOR_VAL: - case IOAT_OP_XOR: { - struct ioat_xor_descriptor *xor = desc->xor; - struct ioat_ring_ent *ext; - struct ioat_xor_ext_descriptor *xor_ex = NULL; - int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 5) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - xor_ex = ext->xor_ex; - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) xor; - descs[1] = (struct ioat_raw_descriptor *) xor_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = xor_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* dest is a source in xor validate operations */ - if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_TODEVICE, flags, 1); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - break; - } - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ: { - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_ring_ent *ext; - struct ioat_pq_ext_descriptor *pq_ex = NULL; - int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 3) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - pq_ex = ext->pq_ex; - } - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) pq; - descs[1] = (struct ioat_raw_descriptor *) pq_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - case IOAT_OP_PQ_16S: - case IOAT_OP_PQ_VAL_16S: { - struct ioat_pq_descriptor *pq = desc->pq; - int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[4]; - int i; - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *)pq; - descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); - descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq16_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - default: - dev_err(&pdev->dev, "%s: unknown op type: %#x\n", - __func__, desc->hw->ctl_f.op); - } + kmem_cache_free(ioat3_sed_cache, sed); } static bool desc_has_ext(struct ioat_ring_ent *desc) @@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; if (tx->cookie) { dma_cookie_complete(tx); - ioat3_dma_unmap(ioat, desc, idx + i); + dma_descriptor_unmap(tx); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ioat3_cleanup(ioat); @@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, u8 op; int i, s, idx, num_descs; - /* this function only handles src_cnt 9 - 16 */ - BUG_ON(src_cnt < 9); - /* this function is only called with 9-16 sources */ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; @@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, descs[0] = (struct ioat_raw_descriptor *) pq; - desc->sed = ioat3_alloc_sed(device, - sed_get_pq16_pool_idx(src_cnt)); + desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); if (!desc->sed) { dev_err(to_dev(chan), "%s: no free sed entries\n", __func__); @@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, return &desc->txd; } +static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) +{ + if (dmaf_p_disabled_continue(flags)) + return src_cnt + 1; + else if (dmaf_continue(flags)) + return src_cnt + 3; + else + return src_cnt; +} + static struct dma_async_tx_descriptor * ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; @@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef[0] = scf[0]; single_source_coef[1] = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 2, single_source_coef, len, flags) : @@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef, len, flags); } else { - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, @@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; @@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, */ *pqres = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, @@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor * ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = dst; /* specify valid address for disabled result */ - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, @@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = pq[0]; /* specify valid address for disabled result */ - - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, scf, len, flags) : __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, @@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test xor prep failed\n"); @@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test zero prep failed\n"); err = -ENODEV; @@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } + memset(page_address(dest), 0, PAGE_SIZE); + /* test for non-zero parity sum */ op = IOAT_OP_XOR_VAL; @@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test 2nd zero prep failed\n"); err = -ENODEV; @@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) static int ioat3_irq_reinit(struct ioatdma_device *device) { - int msixcnt = device->common.chancnt; struct pci_dev *pdev = device->pdev; - int i; - struct msix_entry *msix; - struct ioat_chan_common *chan; - int err = 0; + int irq = pdev->irq, i; + + if (!is_bwd_ioat(pdev)) + return 0; switch (device->irq_mode) { case IOAT_MSIX: + for (i = 0; i < device->common.chancnt; i++) { + struct msix_entry *msix = &device->msix_entries[i]; + struct ioat_chan_common *chan; - for (i = 0; i < msixcnt; i++) { - msix = &device->msix_entries[i]; chan = ioat_chan_by_index(device, i); devm_free_irq(&pdev->dev, msix->vector, chan); } pci_disable_msix(pdev); break; - - case IOAT_MSIX_SINGLE: - msix = &device->msix_entries[0]; - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, msix->vector, chan); - pci_disable_msix(pdev); - break; - case IOAT_MSI: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); pci_disable_msi(pdev); - break; - + /* fall through */ case IOAT_INTX: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); + devm_free_irq(&pdev->dev, irq, device); break; - default: return 0; } - device->irq_mode = IOAT_NOIRQ; - err = ioat_dma_setup_interrupts(device); - - return err; + return ioat_dma_setup_interrupts(device); } static int ioat3_reset_hw(struct ioat_chan_common *chan) @@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) } err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); - if (err) { - dev_err(&pdev->dev, "Failed to reset!\n"); - return err; - } - - if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) + if (!err) err = ioat3_irq_reinit(device); + if (err) + dev_err(&pdev->dev, "Failed to reset: %d\n", err); + return err; } @@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) char pool_name[14]; int i; - /* allocate sw descriptor pool for SED */ - device->sed_pool = kmem_cache_create("ioat_sed", - sizeof(struct ioat_sed_ent), 0, 0, NULL); - if (!device->sed_pool) - return -ENOMEM; - for (i = 0; i < MAX_SED_POOLS; i++) { snprintf(pool_name, 14, "ioat_hw%d_sed", i); /* allocate SED DMA pool */ - device->sed_hw_pool[i] = dma_pool_create(pool_name, + device->sed_hw_pool[i] = dmam_pool_create(pool_name, &pdev->dev, SED_SIZE * (i + 1), 64, 0); if (!device->sed_hw_pool[i]) - goto sed_pool_cleanup; + return -ENOMEM; } } @@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) device->dca = ioat3_dca_init(pdev, device->reg_base); return 0; - -sed_pool_cleanup: - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } - - return -ENOMEM; -} - -void ioat3_dma_remove(struct ioatdma_device *device) -{ - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } } diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 2c8d560e6334..1d051cd045db 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); struct kmem_cache *ioat2_cache; +struct kmem_cache *ioat3_sed_cache; #define DRV_NAME "ioatdma" @@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; - if (device->version >= IOAT_VER_3_0) - ioat3_dma_remove(device); - dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); @@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev) static int __init ioat_init_module(void) { - int err; + int err = -ENOMEM; pr_info("%s: Intel(R) QuickData Technology Driver %s\n", DRV_NAME, IOAT_DMA_VERSION); @@ -231,9 +229,21 @@ static int __init ioat_init_module(void) if (!ioat2_cache) return -ENOMEM; + ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); + if (!ioat3_sed_cache) + goto err_ioat2_cache; + err = pci_register_driver(&ioat_pci_driver); if (err) - kmem_cache_destroy(ioat2_cache); + goto err_ioat3_cache; + + return 0; + + err_ioat3_cache: + kmem_cache_destroy(ioat3_sed_cache); + + err_ioat2_cache: + kmem_cache_destroy(ioat2_cache); return err; } diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index dd8b44a56e5d..c56137bc3868 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) } } -static void -iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = iop_desc_get_dest_addr(unmap, iop_chan); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - } - desc->group_head = NULL; -} - -static void -iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt = unmap->unmap_src_cnt; - dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); - dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); - int i; - - if (tx->flags & DMA_PREP_CONTINUE) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dma_addr_t addr; - - for (i = 0; i < src_cnt; i++) { - addr = iop_desc_get_src_addr(unmap, iop_chan, i); - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - if (desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); - dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); - } - } - - desc->group_head = NULL; -} - - static dma_cookie_t iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct iop_adma_chan *iop_chan, dma_cookie_t cookie) @@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, if (tx->callback) tx->callback(tx->callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - if (iop_desc_is_pq(desc)) - iop_desc_unmap_pq(iop_chan, desc); - else - iop_desc_unmap(iop_chan, desc); - } + dma_descriptor_unmap(tx); + if (desc->group_head) + desc->group_head = NULL; } /* run dependent operations */ @@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) if (sw_desc) { grp_start = sw_desc->group_head; iop_desc_init_interrupt(grp_start, iop_chan); - grp_start->unmap_len = 0; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); iop_desc_set_memcpy_src_addr(grp_start, dma_src); - sw_desc->unmap_src_cnt = 1; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_init_xor(grp_start, src_cnt, flags); iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_xor_src_addr(grp_start, src_cnt, @@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, grp_start->xor_check_result = result; pr_debug("\t%s: grp_start->xor_check_result: %p\n", __func__, grp_start->xor_check_result); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, @@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, dst[0] = dst[1] & 0x7; iop_desc_set_pq_addr(g, dst); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; for (i = 0; i < src_cnt; i++) iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); @@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, g->pq_check_result = pqres; pr_debug("\t%s: g->pq_check_result: %p\n", __func__, g->pq_check_result); - sw_desc->unmap_src_cnt = src_cnt+2; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, @@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, int ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; iop_adma_slot_cleanup(iop_chan); @@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) msleep(1); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; @@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test zero sum timed out, disabling\n"); err = -ENODEV; @@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test non-zero sum timed out, disabling\n"); err = -ENODEV; @@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb9c0bc317e8..128ca143486d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); descnew = desc; - dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", - irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); + dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n", + irq, (u64)sg_dma_address(*sg), + sgnext ? (u64)sg_dma_address(sgnext) : 0, + ichan->active_buffer, curbuf); /* Find the descriptor of sgnext */ sgnew = idmac_sg_next(ichan, &descnew, *sg); diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a2c330f5f952..e26075408e9b 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&c->vc.lock, flags); @@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op) irq = platform_get_irq(op, 0); ret = devm_request_irq(&op->dev, irq, - k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); + k3_dma_int_handler, 0, DRIVER_NAME, d); if (ret) return ret; diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index ff8d7827f8cb..dcb1e05149a7 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data) * move the descriptors to a temporary list so we can drop * the lock during the entire cleanup operation */ - list_del(&desc->node); - list_add(&desc->node, &chain_cleanup); + list_move(&desc->node, &chain_cleanup); /* * Look for the first list entry which has the ENDIRQEN flag @@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, if (irq) { ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); + mmp_pdma_chan_handler, 0, "pdma", phy); if (ret) { dev_err(pdev->dev, "channel request irq fail!\n"); return ret; @@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op) /* all chan share one irq, demux inside */ irq = platform_get_irq(op, 0); ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); + mmp_pdma_int_handler, 0, "pdma", pdev); if (ret) return ret; } diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index d3b6358e5a27..3ddacc14a736 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -62,6 +62,11 @@ #define TDCR_BURSTSZ_16B (0x3 << 6) #define TDCR_BURSTSZ_32B (0x6 << 6) #define TDCR_BURSTSZ_64B (0x7 << 6) +#define TDCR_BURSTSZ_SQU_1B (0x5 << 6) +#define TDCR_BURSTSZ_SQU_2B (0x6 << 6) +#define TDCR_BURSTSZ_SQU_4B (0x0 << 6) +#define TDCR_BURSTSZ_SQU_8B (0x1 << 6) +#define TDCR_BURSTSZ_SQU_16B (0x3 << 6) #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) #define TDCR_BURSTSZ_128B (0x5 << 6) #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ @@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) /* disable irq */ writel(0, tdmac->reg_base + TDIMR); - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; } static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) @@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) return -EINVAL; } } else if (tdmac->type == PXA910_SQU) { - tdcr |= TDCR_BURSTSZ_SQU_32B; tdcr |= TDCR_SSPMOD; + + switch (tdmac->burst_sz) { + case 1: + tdcr |= TDCR_BURSTSZ_SQU_1B; + break; + case 2: + tdcr |= TDCR_BURSTSZ_SQU_2B; + break; + case 4: + tdcr |= TDCR_BURSTSZ_SQU_4B; + break; + case 8: + tdcr |= TDCR_BURSTSZ_SQU_8B; + break; + case 16: + tdcr |= TDCR_BURSTSZ_SQU_16B; + break; + case 32: + tdcr |= TDCR_BURSTSZ_SQU_32B; + break; + default: + dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + return -EINVAL; + } } writel(tdcr, tdmac->reg_base + TDCR); @@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) if (tdmac->irq) { ret = devm_request_irq(tdmac->dev, tdmac->irq, - mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); + mmp_tdma_chan_handler, 0, "tdma", tdmac); if (ret) return ret; } @@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( int num_periods = buf_len / period_len; int i = 0, buf = 0; - if (tdmac->status != DMA_SUCCESS) + if (tdmac->status != DMA_COMPLETE) return NULL; if (period_len > TDMA_MAX_XFER_BYTES) { @@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, tdmac->idx = idx; tdmac->type = type; tdmac->reg_base = (unsigned long)tdev->base + idx * 4; - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; tdev->tdmac[tdmac->idx] = tdmac; tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); @@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, - mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); + mmp_tdma_int_handler, 0, "tdma", tdev); if (ret) return ret; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 536dcb8ba5fd..7807f0ef4e20 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) return hw_desc->phy_dest_addr; } -static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, - int src_idx) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; -} - - static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { @@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, desc->async_tx.callback( desc->async_tx.callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - struct mv_xor_desc_slot *unmap = desc->group_head; - struct device *dev = mv_chan_to_devp(mv_chan); - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = desc->async_tx.flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = mv_desc_get_dest_addr(unmap); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor ? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = mv_desc_get_src_addr(unmap, - src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, - DMA_TO_DEVICE); - } - } + dma_descriptor_unmap(&desc->async_tx); + if (desc->group_head) desc->group_head = NULL; - } } /* run dependent operations */ @@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) { + if (ret == DMA_COMPLETE) { mv_xor_clean_completed_slots(mv_chan); return ret; } @@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; @@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, } mv_chan->mmr_base = xordev->xor_base; - if (!mv_chan->mmr_base) { - ret = -ENOMEM; - goto err_free_dma; - } + mv_chan->mmr_high_base = xordev->xor_high_base; tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); @@ -1138,7 +1094,7 @@ static void mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, const struct mbus_dram_target_info *dram) { - void __iomem *base = xordev->xor_base; + void __iomem *base = xordev->xor_high_base; u32 win_enable = 0; int i; diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06b067f24c9b..d0749229c875 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -34,13 +34,13 @@ #define XOR_OPERATION_MODE_MEMCPY 2 #define XOR_DESCRIPTOR_SWAP BIT(14) -#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) -#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) -#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) -#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) -#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) -#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) -#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) +#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) +#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) +#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) +#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4)) +#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4)) +#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0) +#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4) #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) @@ -50,11 +50,11 @@ #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) #define XOR_INTR_MASK_VALUE 0x3F5 -#define WINDOW_BASE(w) (0x250 + ((w) << 2)) -#define WINDOW_SIZE(w) (0x270 + ((w) << 2)) -#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) -#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) -#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) +#define WINDOW_BASE(w) (0x50 + ((w) << 2)) +#define WINDOW_SIZE(w) (0x70 + ((w) << 2)) +#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2)) +#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) +#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) struct mv_xor_device { void __iomem *xor_base; @@ -82,6 +82,7 @@ struct mv_xor_chan { int pending; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; + void __iomem *mmr_high_base; unsigned int idx; int irq; enum dma_transaction_type current_type; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ccd13df841db..ead491346da7 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -27,6 +27,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_dma.h> +#include <linux/list.h> #include <asm/irq.h> @@ -57,6 +58,9 @@ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) #define HW_APBHX_CHn_SEMA(d, n) \ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) +#define HW_APBHX_CHn_BAR(d, n) \ + (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) +#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) /* * ccw bits definitions @@ -115,7 +119,9 @@ struct mxs_dma_chan { int desc_count; enum dma_status status; unsigned int flags; + bool reset; #define MXS_DMA_SG_LOOP (1 << 0) +#define MXS_DMA_USE_SEMAPHORE (1 << 1) }; #define MXS_DMA_CHANNELS 16 @@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; - if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) + /* + * mxs dma channel resets can cause a channel stall. To recover from a + * channel stall, we have to reset the whole DMA engine. To avoid this, + * we use cyclic DMA with semaphores, that are enhanced in + * mxs_dma_int_handler. To reset the channel, we can simply stop writing + * into the semaphore counter. + */ + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->reset = true; + } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); - else + } else { + unsigned long elapsed = 0; + const unsigned long max_wait = 50000; /* 50ms */ + void __iomem *reg_dbg1 = mxs_dma->base + + HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); + + /* + * On i.MX28 APBX, the DMA channel can stop working if we reset + * the channel while it is in READ_FLUSH (0x08) state. + * We wait here until we leave the state. Then we trigger the + * reset. Waiting a maximum of 50ms, the kernel shouldn't crash + * because of this. + */ + while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { + udelay(100); + elapsed += 100; + } + + if (elapsed >= max_wait) + dev_err(&mxs_chan->mxs_dma->pdev->dev, + "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n", + chan_id); + writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); + } + + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) @@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); /* write 1 to SEMA to kick off the channel */ - writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + /* A cyclic DMA consists of at least 2 segments, so initialize + * the semaphore with 2 so we have enough time to add 1 to the + * semaphore if we need to */ + writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } else { + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } + mxs_chan->reset = false; } static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) { - mxs_chan->status = DMA_SUCCESS; + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) @@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data) mxs_chan->desc.callback(mxs_chan->desc.callback_param); } +static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) +{ + int i; + + for (i = 0; i != mxs_dma->nr_channels; ++i) + if (mxs_dma->mxs_chans[i].chan_irq == irq) + return i; + + return -EINVAL; +} + static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) { struct mxs_dma_engine *mxs_dma = dev_id; - u32 stat1, stat2; + struct mxs_dma_chan *mxs_chan; + u32 completed; + u32 err; + int chan = mxs_dma_irq_to_chan(mxs_dma, irq); + + if (chan < 0) + return IRQ_NONE; /* completion status */ - stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); - stat1 &= MXS_DMA_CHANNELS_MASK; - writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); + completed = readl(mxs_dma->base + HW_APBHX_CTRL1); + completed = (completed >> chan) & 0x1; + + /* Clear interrupt */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); /* error status */ - stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); - writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); + err = readl(mxs_dma->base + HW_APBHX_CTRL2); + err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); + + /* + * error status bit is in the upper 16 bits, error irq bit in the lower + * 16 bits. We transform it into a simpler error code: + * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR + */ + err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); + + /* Clear error irq */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); /* * When both completion and error of termination bits set at the * same time, we do not take it as an error. IOW, it only becomes - * an error we need to handle here in case of either it's (1) a bus - * error or (2) a termination error with no completion. + * an error we need to handle here in case of either it's a bus + * error or a termination error with no completion. 0x01 is termination + * error, so we can subtract err & completed to get the real error case. */ - stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ - (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ - - /* combine error and completion status for checking */ - stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; - while (stat1) { - int channel = fls(stat1) - 1; - struct mxs_dma_chan *mxs_chan = - &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; - - if (channel >= MXS_DMA_CHANNELS) { - dev_dbg(mxs_dma->dma_device.dev, - "%s: error in channel %d\n", __func__, - channel - MXS_DMA_CHANNELS); - mxs_chan->status = DMA_ERROR; - mxs_dma_reset_chan(mxs_chan); - } else { - if (mxs_chan->flags & MXS_DMA_SG_LOOP) - mxs_chan->status = DMA_IN_PROGRESS; - else - mxs_chan->status = DMA_SUCCESS; - } + err -= err & completed; - stat1 &= ~(1 << channel); + mxs_chan = &mxs_dma->mxs_chans[chan]; - if (mxs_chan->status == DMA_SUCCESS) - dma_cookie_complete(&mxs_chan->desc); + if (err) { + dev_dbg(mxs_dma->dma_device.dev, + "%s: error in channel %d\n", __func__, + chan); + mxs_chan->status = DMA_ERROR; + mxs_dma_reset_chan(mxs_chan); + } else if (mxs_chan->status != DMA_COMPLETE) { + if (mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->status = DMA_IN_PROGRESS; + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) + writel(1, mxs_dma->base + + HW_APBHX_CHn_SEMA(mxs_dma, chan)); + } else { + mxs_chan->status = DMA_COMPLETE; + } + } - /* schedule tasklet on this channel */ - tasklet_schedule(&mxs_chan->tasklet); + if (mxs_chan->status == DMA_COMPLETE) { + if (mxs_chan->reset) + return IRQ_HANDLED; + dma_cookie_complete(&mxs_chan->desc); } + /* schedule tasklet on this channel */ + tasklet_schedule(&mxs_chan->tasklet); + return IRQ_HANDLED; } @@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( mxs_chan->status = DMA_IN_PROGRESS; mxs_chan->flags |= MXS_DMA_SG_LOOP; + mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; if (num_periods > NUM_CCW) { dev_err(mxs_dma->dma_device.dev, @@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( ccw->bits |= CCW_IRQ; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= CCW_DEC_SEM; ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); @@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + u32 residue = 0; + + if (mxs_chan->status == DMA_IN_PROGRESS && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + struct mxs_dma_ccw *last_ccw; + u32 bar; + + last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; + residue = last_ccw->xfer_bytes + last_ccw->bufaddr; + + bar = readl(mxs_dma->base + + HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); + residue -= bar; + } - dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); + dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, + residue); return mxs_chan->status; } diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ec3fc4fd9160..2f66cf4e54fe 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&c->vc.lock, flags); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index df8b10fd1726..cdf0483b8f2d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data) list_move_tail(&desc->node, &pch->dmac->desc_pool); } + dma_descriptor_unmap(&desc->txd); + if (callback) { spin_unlock_irqrestore(&pch->lock, flags); callback(callback_param); @@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param) return false; peri_id = chan->private; - return *peri_id == (unsigned)param; + return *peri_id == (unsigned long)param; } EXPORT_SYMBOL(pl330_filter); @@ -2926,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) amba_set_drvdata(adev, pdmac); - irq = adev->irq[0]; - ret = request_irq(irq, pl330_irq_handler, 0, - dev_name(&adev->dev), pi); - if (ret) - return ret; + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + if (irq) { + ret = devm_request_irq(&adev->dev, irq, + pl330_irq_handler, 0, + dev_name(&adev->dev), pi); + if (ret) + return ret; + } else { + break; + } + } pi->pcfg.periph_id = adev->periphid; ret = pl330_add(pi); if (ret) - goto probe_err1; + return ret; INIT_LIST_HEAD(&pdmac->desc_pool); spin_lock_init(&pdmac->pool_lock); @@ -3033,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return 0; probe_err3: - amba_set_drvdata(adev, NULL); - /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, chan.device_node) { @@ -3048,8 +3055,6 @@ probe_err3: } probe_err2: pl330_del(pi); -probe_err1: - free_irq(irq, pi); return ret; } @@ -3059,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev) struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; - int irq; if (!pdmac) return 0; @@ -3068,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev) of_dma_controller_free(adev->dev.of_node); dma_async_device_unregister(&pdmac->ddma); - amba_set_drvdata(adev, NULL); /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, @@ -3086,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev) pl330_del(pi); - irq = adev->irq[0]; - free_irq(irq, pi); - return 0; } diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e24b5ef486b5..8da48c6b2a38 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -804,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, } /** - * ppc440spe_desc_get_src_addr - extract the source address from the descriptor - */ -static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int src_idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - /* May have 0, 1, 2, or 3 sources */ - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - if (unlikely(src_idx)) { - printk(KERN_ERR "%s: try to get %d source for" - " DCHECK128\n", __func__, src_idx); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case DMA_CDB_OPC_MULTICAST: - case DMA_CDB_OPC_MV_SG1_SG2: - if (unlikely(src_idx > 2)) { - printk(KERN_ERR "%s: try to get %d source from" - " DMA descr\n", __func__, src_idx); - BUG(); - } - if (src_idx) { - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - u8 region; - - if (src_idx == 1) - return le32_to_cpu( - dma_hw_desc->sg1l) + - desc->unmap_len; - - region = (le32_to_cpu( - dma_hw_desc->sg1u)) >> - DMA_CUED_REGION_OFF; - - region &= DMA_CUED_REGION_MSK; - switch (region) { - case DMA_RXOR123: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 1); - case DMA_RXOR124: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len * 3); - case DMA_RXOR125: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 2); - default: - printk(KERN_ERR - "%s: try to" - " get src3 for region %02x" - "PPC440SPE_DESC_RXOR12?\n", - __func__, region); - BUG(); - } - } else { - printk(KERN_ERR - "%s: try to get %d" - " source for non-cued descr\n", - __func__, src_idx); - BUG(); - } - } - return le32_to_cpu(dma_hw_desc->sg1l); - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case PPC440SPE_XOR_ID: - /* May have up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->ops[src_idx].l; - } - return 0; -} - -/** - * ppc440spe_desc_get_dest_addr - extract the destination address from the - * descriptor - */ -static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - if (likely(!idx)) - return le32_to_cpu(dma_hw_desc->sg2l); - return le32_to_cpu(dma_hw_desc->sg3l); - case PPC440SPE_XOR_ID: - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbtal; - } - return 0; -} - -/** - * ppc440spe_desc_get_src_num - extract the number of source addresses from - * the descriptor - */ -static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - return 1; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_MULTICAST: - /* - * Only for RXOR operations we have more than - * one source - */ - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - /* RXOR op, there are 2 or 3 sources */ - if (((le32_to_cpu(dma_hw_desc->sg1u) >> - DMA_CUED_REGION_OFF) & - DMA_CUED_REGION_MSK) == DMA_RXOR12) { - /* RXOR 1-2 */ - return 2; - } else { - /* RXOR 1-2-3/1-2-4/1-2-5 */ - return 3; - } - } - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; - default: - BUG(); - } - return 0; -} - -/** - * ppc440spe_desc_get_dst_num - get the number of destination addresses in - * this descriptor - */ -static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - /* May be 1 or 2 destinations */ - dma_hw_desc = desc->hw_desc; - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DCHECK128: - return 0; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_DFILL128: - return 1; - case DMA_CDB_OPC_MULTICAST: - if (desc->dst_cnt == 2) - return 2; - else - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* Always only 1 destination */ - return 1; - default: - BUG(); - } - return 0; -} - -/** * ppc440spe_desc_get_link - get the address of the descriptor that * follows this one */ @@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, } } -static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, - struct ppc440spe_adma_desc_slot *desc) -{ - u32 src_cnt, dst_cnt; - dma_addr_t addr; - - /* - * get the number of sources & destination - * included in this descriptor and unmap - * them all - */ - src_cnt = ppc440spe_desc_get_src_num(desc, chan); - dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); - - /* unmap destinations */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - while (dst_cnt--) { - addr = ppc440spe_desc_get_dest_addr( - desc, chan, dst_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_FROM_DEVICE); - } - } - - /* unmap sources */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = ppc440spe_desc_get_src_addr( - desc, chan, src_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_TO_DEVICE); - } - } -} - /** * ppc440spe_adma_run_tx_complete_actions - call functions to be called * upon completion @@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( desc->async_tx.callback( desc->async_tx.callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - * - * actually, ppc's dma_unmap_page() functions are empty, so - * the following code is just for the sake of completeness - */ - if (chan && chan->needs_unmap && desc->group_head && - desc->unmap_len) { - struct ppc440spe_adma_desc_slot *unmap = - desc->group_head; - /* assume 1 slot per op always */ - u32 slot_count = unmap->slot_cnt; - - /* Run through the group list and unmap addresses */ - for (i = 0; i < slot_count; i++) { - BUG_ON(!unmap); - ppc440spe_adma_unmap(chan, unmap); - unmap = unmap->hw_next; - } - } + dma_descriptor_unmap(&desc->async_tx); } /* run dependent operations */ @@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, ppc440spe_chan = to_ppc440spe_adma_chan(chan); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ppc440spe_adma_slot_cleanup(ppc440spe_chan); diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 461a91ab70bb..ab26d46bbe15 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; if (!state) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index d94ab592cc1b..2e7b394def80 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan, * If we don't find cookie on the queue, it has been aborted and we have * to report error */ - if (status != DMA_SUCCESS) { + if (status != DMA_COMPLETE) { struct shdma_desc *sdesc; status = DMA_ERROR; list_for_each_entry(sdesc, &schan->ld_queue, node) diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1069e8869f20..0d765c0e21ec 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); static int sh_dmae_probe(struct platform_device *pdev) { const struct sh_dmae_pdata *pdata; - unsigned long irqflags = IRQF_DISABLED, + unsigned long irqflags = 0, chan_flag[SH_DMAE_MAX_CHANNELS] = {}; int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; @@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev) IORESOURCE_IRQ_SHAREABLE) chan_flag[irq_cnt] = IRQF_SHARED; else - chan_flag[irq_cnt] = IRQF_DISABLED; + chan_flag[irq_cnt] = 0; dev_dbg(&pdev->dev, "Found IRQ %d for channel %d\n", i, irq_cnt); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 82d2b97ad942..b8c031b7de4e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/log2.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> @@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, } ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) @@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || - ((src_addr_width > 1) && (src_addr_width & 1)) || - ((dst_addr_width > 1) && (dst_addr_width & 1))) + !is_power_of_2(src_addr_width) || + !is_power_of_2(dst_addr_width)) return -EINVAL; cfg->src_info.data_width = src_addr_width; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5d4986e5f5fa..73654e33f13b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, list_del(&sgreq->node); if (sgreq->last_sg) { - dma_desc->dma_status = DMA_SUCCESS; + dma_desc->dma_status = DMA_COMPLETE; dma_cookie_complete(&dma_desc->txd); if (!dma_desc->cb_count) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); @@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, unsigned int residual; ret = dma_cookie_status(dc, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&tdc->lock, flags); @@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( return &dma_desc->txd; } -struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( +static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags, void *context) diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 28af214fce04..4506a7b4f972 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) return done; } -static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, - bool single) -{ - dma_addr_t addr; - int len; - - addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | - dma_desc[4]; - - len = (dma_desc[3] << 8) | dma_desc[2]; - - if (single) - dma_unmap_single(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); - else - dma_unmap_page(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); -} - -static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) -{ - struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, - struct timb_dma_chan, chan); - u8 *descs; - - for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { - __td_unmap_desc(td_chan, descs, single); - if (descs[0] & 0x02) - break; - } -} - static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, struct scatterlist *sg, bool last) { @@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) list_move(&td_desc->desc_node, &td_chan->free_list); - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) - __td_unmap_descs(td_desc, - txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); - + dma_descriptor_unmap(txd); /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 71e8e775189e..bae6c29f5502 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, list_splice_init(&desc->tx_list, &dc->free_list); list_move(&desc->desc_node, &dc->free_list); - if (!ds) { - dma_addr_t dmaaddr; - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.DAR : desc->hwdesc32.DAR; - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.SAR : desc->hwdesc32.SAR; - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here @@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) - return DMA_SUCCESS; + if (ret == DMA_COMPLETE) + return DMA_COMPLETE; spin_lock_bh(&dc->lock); txx9dmac_scan_descriptors(dc); diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index 9ee1c76da7b9..374b57fc596d 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c @@ -163,6 +163,7 @@ static void cell_edac_init_csrows(struct mem_ctl_info *mci) csrow->first_page, nr_pages); break; } + of_node_put(np); } static int cell_edac_probe(struct platform_device *pdev) diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 211021dfec73..102674346035 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -530,12 +530,9 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) /* Report action taken */ edac_device_printk(edac_dev, KERN_INFO, - "Giving out device to module '%s' controller " - "'%s': DEV '%s' (%s)\n", - edac_dev->mod_name, - edac_dev->ctl_name, - edac_dev_name(edac_dev), - edac_op_state_to_string(edac_dev->op_state)); + "Giving out device to module %s controller %s: DEV %s (%s)\n", + edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name, + edac_op_state_to_string(edac_dev->op_state)); mutex_unlock(&device_ctls_mutex); return 0; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 89e109022d78..e8c9ef03495b 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -788,8 +788,10 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) } /* Report action taken */ - edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" - " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci)); + edac_mc_printk(mci, KERN_INFO, + "Giving out device to module %s controller %s: DEV %s (%s)\n", + mci->mod_name, mci->ctl_name, mci->dev_name, + edac_op_state_to_string(mci->op_state)); edac_mc_owner = mci->mod_name; diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index dd370f92ace3..2cf44b4db80c 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c @@ -358,11 +358,9 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) } edac_pci_printk(pci, KERN_INFO, - "Giving out device to module '%s' controller '%s':" - " DEV '%s' (%s)\n", - pci->mod_name, - pci->ctl_name, - edac_dev_name(pci), edac_op_state_to_string(pci->op_state)); + "Giving out device to module %s controller %s: DEV %s (%s)\n", + pci->mod_name, pci->ctl_name, pci->dev_name, + edac_op_state_to_string(pci->op_state)); mutex_unlock(&edac_pci_ctls_mutex); return 0; diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c index c2bd8c6a4349..2f193668ebc7 100644 --- a/drivers/edac/highbank_l2_edac.c +++ b/drivers/edac/highbank_l2_edac.c @@ -50,8 +50,15 @@ static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static const struct of_device_id hb_l2_err_of_match[] = { + { .compatible = "calxeda,hb-sregs-l2-ecc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, hb_l2_err_of_match); + static int highbank_l2_err_probe(struct platform_device *pdev) { + const struct of_device_id *id; struct edac_device_ctl_info *dci; struct hb_l2_drvdata *drvdata; struct resource *r; @@ -90,28 +97,32 @@ static int highbank_l2_err_probe(struct platform_device *pdev) goto err; } + id = of_match_device(hb_l2_err_of_match, &pdev->dev); + dci->mod_name = pdev->dev.driver->name; + dci->ctl_name = id ? id->compatible : "unknown"; + dci->dev_name = dev_name(&pdev->dev); + + if (edac_device_add_device(dci)) + goto err; + drvdata->db_irq = platform_get_irq(pdev, 0); res = devm_request_irq(&pdev->dev, drvdata->db_irq, highbank_l2_err_handler, 0, dev_name(&pdev->dev), dci); if (res < 0) - goto err; + goto err2; drvdata->sb_irq = platform_get_irq(pdev, 1); res = devm_request_irq(&pdev->dev, drvdata->sb_irq, highbank_l2_err_handler, 0, dev_name(&pdev->dev), dci); if (res < 0) - goto err; - - dci->mod_name = dev_name(&pdev->dev); - dci->dev_name = dev_name(&pdev->dev); - - if (edac_device_add_device(dci)) - goto err; + goto err2; devres_close_group(&pdev->dev, NULL); return 0; +err2: + edac_device_del_device(&pdev->dev); err: devres_release_group(&pdev->dev, NULL); edac_device_free_ctl_info(dci); @@ -127,12 +138,6 @@ static int highbank_l2_err_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id hb_l2_err_of_match[] = { - { .compatible = "calxeda,hb-sregs-l2-ecc", }, - {}, -}; -MODULE_DEVICE_TABLE(of, hb_l2_err_of_match); - static struct platform_driver highbank_l2_edac_driver = { .probe = highbank_l2_err_probe, .remove = highbank_l2_err_remove, diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c index 4695dd2d71fd..f784de1dc793 100644 --- a/drivers/edac/highbank_mc_edac.c +++ b/drivers/edac/highbank_mc_edac.c @@ -26,31 +26,40 @@ #include "edac_module.h" /* DDR Ctrlr Error Registers */ -#define HB_DDR_ECC_OPT 0x128 -#define HB_DDR_ECC_U_ERR_ADDR 0x130 -#define HB_DDR_ECC_U_ERR_STAT 0x134 -#define HB_DDR_ECC_U_ERR_DATAL 0x138 -#define HB_DDR_ECC_U_ERR_DATAH 0x13c -#define HB_DDR_ECC_C_ERR_ADDR 0x140 -#define HB_DDR_ECC_C_ERR_STAT 0x144 -#define HB_DDR_ECC_C_ERR_DATAL 0x148 -#define HB_DDR_ECC_C_ERR_DATAH 0x14c -#define HB_DDR_ECC_INT_STATUS 0x180 -#define HB_DDR_ECC_INT_ACK 0x184 -#define HB_DDR_ECC_U_ERR_ID 0x424 -#define HB_DDR_ECC_C_ERR_ID 0x428 -#define HB_DDR_ECC_INT_STAT_CE 0x8 -#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10 -#define HB_DDR_ECC_INT_STAT_UE 0x20 -#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40 +#define HB_DDR_ECC_ERR_BASE 0x128 +#define MW_DDR_ECC_ERR_BASE 0x1b4 + +#define HB_DDR_ECC_OPT 0x00 +#define HB_DDR_ECC_U_ERR_ADDR 0x08 +#define HB_DDR_ECC_U_ERR_STAT 0x0c +#define HB_DDR_ECC_U_ERR_DATAL 0x10 +#define HB_DDR_ECC_U_ERR_DATAH 0x14 +#define HB_DDR_ECC_C_ERR_ADDR 0x18 +#define HB_DDR_ECC_C_ERR_STAT 0x1c +#define HB_DDR_ECC_C_ERR_DATAL 0x20 +#define HB_DDR_ECC_C_ERR_DATAH 0x24 #define HB_DDR_ECC_OPT_MODE_MASK 0x3 #define HB_DDR_ECC_OPT_FWC 0x100 #define HB_DDR_ECC_OPT_XOR_SHIFT 16 +/* DDR Ctrlr Interrupt Registers */ + +#define HB_DDR_ECC_INT_BASE 0x180 +#define MW_DDR_ECC_INT_BASE 0x218 + +#define HB_DDR_ECC_INT_STATUS 0x00 +#define HB_DDR_ECC_INT_ACK 0x04 + +#define HB_DDR_ECC_INT_STAT_CE 0x8 +#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10 +#define HB_DDR_ECC_INT_STAT_UE 0x20 +#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40 + struct hb_mc_drvdata { - void __iomem *mc_vbase; + void __iomem *mc_err_base; + void __iomem *mc_int_base; }; static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) @@ -60,10 +69,10 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) u32 status, err_addr; /* Read the interrupt status register */ - status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS); + status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS); if (status & HB_DDR_ECC_INT_STAT_UE) { - err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR); + err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR); edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, err_addr >> PAGE_SHIFT, err_addr & ~PAGE_MASK, 0, @@ -71,9 +80,9 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) mci->ctl_name, ""); } if (status & HB_DDR_ECC_INT_STAT_CE) { - u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT); + u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT); syndrome = (syndrome >> 8) & 0xff; - err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR); + err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, err_addr >> PAGE_SHIFT, err_addr & ~PAGE_MASK, syndrome, @@ -82,66 +91,79 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) } /* clear the error, clears the interrupt */ - writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK); + writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK); return IRQ_HANDLED; } -#ifdef CONFIG_EDAC_DEBUG -static ssize_t highbank_mc_err_inject_write(struct file *file, - const char __user *data, - size_t count, loff_t *ppos) +static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd) { - struct mem_ctl_info *mci = file->private_data; struct hb_mc_drvdata *pdata = mci->pvt_info; - char buf[32]; - size_t buf_size; u32 reg; + + reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT); + reg &= HB_DDR_ECC_OPT_MODE_MASK; + reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC; + writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT); +} + +#define to_mci(k) container_of(k, struct mem_ctl_info, dev) + +static ssize_t highbank_mc_inject_ctrl(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct mem_ctl_info *mci = to_mci(dev); u8 synd; - buf_size = min(count, (sizeof(buf)-1)); - if (copy_from_user(buf, data, buf_size)) - return -EFAULT; - buf[buf_size] = 0; + if (kstrtou8(buf, 16, &synd)) + return -EINVAL; - if (!kstrtou8(buf, 16, &synd)) { - reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT); - reg &= HB_DDR_ECC_OPT_MODE_MASK; - reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC; - writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT); - } + highbank_mc_err_inject(mci, synd); return count; } -static const struct file_operations highbank_mc_debug_inject_fops = { - .open = simple_open, - .write = highbank_mc_err_inject_write, - .llseek = generic_file_llseek, +static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl); + +struct hb_mc_settings { + int err_offset; + int int_offset; }; -static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) -{ - if (mci->debugfs) - debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, - &highbank_mc_debug_inject_fops); -; -} -#else -static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) -{} -#endif +static struct hb_mc_settings hb_settings = { + .err_offset = HB_DDR_ECC_ERR_BASE, + .int_offset = HB_DDR_ECC_INT_BASE, +}; + +static struct hb_mc_settings mw_settings = { + .err_offset = MW_DDR_ECC_ERR_BASE, + .int_offset = MW_DDR_ECC_INT_BASE, +}; + +static struct of_device_id hb_ddr_ctrl_of_match[] = { + { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings }, + { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings }, + {}, +}; +MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match); static int highbank_mc_probe(struct platform_device *pdev) { + const struct of_device_id *id; + const struct hb_mc_settings *settings; struct edac_mc_layer layers[2]; struct mem_ctl_info *mci; struct hb_mc_drvdata *drvdata; struct dimm_info *dimm; struct resource *r; + void __iomem *base; u32 control; int irq; int res = 0; + id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev); + if (!id) + return -ENODEV; + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = 1; layers[0].is_virt_csrow = true; @@ -174,35 +196,31 @@ static int highbank_mc_probe(struct platform_device *pdev) goto err; } - drvdata->mc_vbase = devm_ioremap(&pdev->dev, - r->start, resource_size(r)); - if (!drvdata->mc_vbase) { + base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!base) { dev_err(&pdev->dev, "Unable to map regs\n"); res = -ENOMEM; goto err; } - control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3; + settings = id->data; + drvdata->mc_err_base = base + settings->err_offset; + drvdata->mc_int_base = base + settings->int_offset; + + control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3; if (!control || (control == 0x2)) { dev_err(&pdev->dev, "No ECC present, or ECC disabled\n"); res = -ENODEV; goto err; } - irq = platform_get_irq(pdev, 0); - res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler, - 0, dev_name(&pdev->dev), mci); - if (res < 0) { - dev_err(&pdev->dev, "Unable to request irq %d\n", irq); - goto err; - } - mci->mtype_cap = MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; - mci->mod_name = dev_name(&pdev->dev); + mci->mod_name = pdev->dev.driver->name; mci->mod_ver = "1"; - mci->ctl_name = dev_name(&pdev->dev); + mci->ctl_name = id->compatible; + mci->dev_name = dev_name(&pdev->dev); mci->scrub_mode = SCRUB_SW_SRC; /* Only a single 4GB DIMM is supported */ @@ -217,10 +235,20 @@ static int highbank_mc_probe(struct platform_device *pdev) if (res < 0) goto err; - highbank_mc_create_debugfs_nodes(mci); + irq = platform_get_irq(pdev, 0); + res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler, + 0, dev_name(&pdev->dev), mci); + if (res < 0) { + dev_err(&pdev->dev, "Unable to request irq %d\n", irq); + goto err2; + } + + device_create_file(&mci->dev, &dev_attr_inject_ctrl); devres_close_group(&pdev->dev, NULL); return 0; +err2: + edac_mc_del_mc(&pdev->dev); err: devres_release_group(&pdev->dev, NULL); edac_mc_free(mci); @@ -231,17 +259,12 @@ static int highbank_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); + device_remove_file(&mci->dev, &dev_attr_inject_ctrl); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); return 0; } -static const struct of_device_id hb_ddr_ctrl_of_match[] = { - { .compatible = "calxeda,hb-ddr-ctrl", }, - {}, -}; -MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match); - static struct platform_driver highbank_mc_edac_driver = { .probe = highbank_mc_probe, .remove = highbank_mc_remove, diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 3eb32f62d72a..fd46b0bd5f2a 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -327,28 +327,6 @@ err: } EXPORT_SYMBOL(mpc85xx_pci_err_probe); -static int mpc85xx_pci_err_remove(struct platform_device *op) -{ - struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); - struct mpc85xx_pci_pdata *pdata = pci->pvt_info; - - edac_dbg(0, "\n"); - - out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, - orig_pci_err_cap_dr); - - out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en); - - edac_pci_del_device(pci->dev); - - if (edac_op_state == EDAC_OPSTATE_INT) - irq_dispose_mapping(pdata->irq); - - edac_pci_free_ctl_info(pci); - - return 0; -} - #endif /* CONFIG_PCI */ /**************************** L2 Err device ***************************/ diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 88f60c5fecbc..8472405c5586 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -34,7 +34,7 @@ static int probed; /* * Alter this version for the module when modifications are made */ -#define SBRIDGE_REVISION " Ver: 1.0.0 " +#define SBRIDGE_REVISION " Ver: 1.1.0 " #define EDAC_MOD_STR "sbridge_edac" /* @@ -83,11 +83,17 @@ static int probed; #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ -static const u32 dram_rule[] = { +static const u32 sbridge_dram_rule[] = { 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, }; -#define MAX_SAD ARRAY_SIZE(dram_rule) + +static const u32 ibridge_dram_rule[] = { + 0x60, 0x68, 0x70, 0x78, 0x80, + 0x88, 0x90, 0x98, 0xa0, 0xa8, + 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, + 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, +}; #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff) #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) @@ -108,43 +114,50 @@ static char *get_dram_attr(u32 reg) } } -static const u32 interleave_list[] = { +static const u32 sbridge_interleave_list[] = { 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc, 0xc4, 0xcc, }; -#define MAX_INTERLEAVE ARRAY_SIZE(interleave_list) - -#define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2) -#define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5) -#define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10) -#define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13) -#define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18) -#define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21) -#define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26) -#define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29) - -static inline int sad_pkg(u32 reg, int interleave) + +static const u32 ibridge_interleave_list[] = { + 0x64, 0x6c, 0x74, 0x7c, 0x84, + 0x8c, 0x94, 0x9c, 0xa4, 0xac, + 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, + 0xdc, 0xe4, 0xec, 0xf4, 0xfc, +}; + +struct interleave_pkg { + unsigned char start; + unsigned char end; +}; + +static const struct interleave_pkg sbridge_interleave_pkg[] = { + { 0, 2 }, + { 3, 5 }, + { 8, 10 }, + { 11, 13 }, + { 16, 18 }, + { 19, 21 }, + { 24, 26 }, + { 27, 29 }, +}; + +static const struct interleave_pkg ibridge_interleave_pkg[] = { + { 0, 3 }, + { 4, 7 }, + { 8, 11 }, + { 12, 15 }, + { 16, 19 }, + { 20, 23 }, + { 24, 27 }, + { 28, 31 }, +}; + +static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, + int interleave) { - switch (interleave) { - case 0: - return SAD_PKG0(reg); - case 1: - return SAD_PKG1(reg); - case 2: - return SAD_PKG2(reg); - case 3: - return SAD_PKG3(reg); - case 4: - return SAD_PKG4(reg); - case 5: - return SAD_PKG5(reg); - case 6: - return SAD_PKG6(reg); - case 7: - return SAD_PKG7(reg); - default: - return -EINVAL; - } + return GET_BITFIELD(reg, table[interleave].start, + table[interleave].end); } /* Devices 12 Function 7 */ @@ -262,7 +275,9 @@ static const u32 correrrthrsld[] = { /* Device 17, function 0 */ -#define RANK_CFG_A 0x0328 +#define SB_RANK_CFG_A 0x0328 + +#define IB_RANK_CFG_A 0x0320 #define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11) @@ -273,8 +288,23 @@ static const u32 correrrthrsld[] = { #define NUM_CHANNELS 4 #define MAX_DIMMS 3 /* Max DIMMS per channel */ +enum type { + SANDY_BRIDGE, + IVY_BRIDGE, +}; + +struct sbridge_pvt; struct sbridge_info { - u32 mcmtr; + enum type type; + u32 mcmtr; + u32 rankcfgr; + u64 (*get_tolm)(struct sbridge_pvt *pvt); + u64 (*get_tohm)(struct sbridge_pvt *pvt); + const u32 *dram_rule; + const u32 *interleave_list; + const struct interleave_pkg *interleave_pkg; + u8 max_sad; + u8 max_interleave; }; struct sbridge_channel { @@ -305,8 +335,9 @@ struct sbridge_dev { struct sbridge_pvt { struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; - struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0; - struct pci_dev *pci_br; + struct pci_dev *pci_sad0, *pci_sad1; + struct pci_dev *pci_ha0, *pci_ha1; + struct pci_dev *pci_br0, *pci_br1; struct pci_dev *pci_tad[NUM_CHANNELS]; struct sbridge_dev *sbridge_dev; @@ -364,11 +395,75 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { {0,} /* 0 terminated list. */ }; +/* This changes depending if 1HA or 2HA: + * 1HA: + * 0x0eb8 (17.0) is DDRIO0 + * 2HA: + * 0x0ebc (17.4) is DDRIO0 + */ +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc + +/* pci ids */ +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead +#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b + +static const struct pci_id_descr pci_dev_descr_ibridge[] = { + /* Processor Home Agent */ + { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, + + /* Memory controller */ + { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, + { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, + { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, + { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, + { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, + { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, + + /* System Address Decoder */ + { PCI_DESCR(22, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, + + /* Broadcast Registers */ + { PCI_DESCR(22, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, + { PCI_DESCR(22, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, + + /* Optional, mode 2HA */ + { PCI_DESCR(28, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, +#if 0 + { PCI_DESCR(29, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, + { PCI_DESCR(29, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, +#endif + { PCI_DESCR(29, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, + { PCI_DESCR(29, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, + + { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, + { PCI_DESCR(17, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, +}; + +static const struct pci_id_table pci_dev_descr_ibridge_table[] = { + PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), + {0,} /* 0 terminated list. */ +}; + /* * pci_device_id table for which devices we are looking for */ static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, {0,} /* 0 terminated list. */ }; @@ -458,6 +553,52 @@ static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) kfree(sbridge_dev); } +static u64 sbridge_get_tolm(struct sbridge_pvt *pvt) +{ + u32 reg; + + /* Address range is 32:28 */ + pci_read_config_dword(pvt->pci_sad1, TOLM, ®); + return GET_TOLM(reg); +} + +static u64 sbridge_get_tohm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_sad1, TOHM, ®); + return GET_TOHM(reg); +} + +static u64 ibridge_get_tolm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_br1, TOLM, ®); + + return GET_TOLM(reg); +} + +static u64 ibridge_get_tohm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_br1, TOHM, ®); + + return GET_TOHM(reg); +} + +static inline u8 sad_pkg_socket(u8 pkg) +{ + /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */ + return (pkg >> 3) | (pkg & 0x3); +} + +static inline u8 sad_pkg_ha(u8 pkg) +{ + return (pkg >> 2) & 0x1; +} + /**************************************************************************** Memory check routines ****************************************************************************/ @@ -520,10 +661,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) enum edac_type mode; enum mem_type mtype; - pci_read_config_dword(pvt->pci_br, SAD_TARGET, ®); + pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); pvt->sbridge_dev->source_id = SOURCE_ID(reg); - pci_read_config_dword(pvt->pci_br, SAD_CONTROL, ®); + pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®); pvt->sbridge_dev->node_id = NODE_ID(reg); edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", pvt->sbridge_dev->mc, @@ -558,7 +699,8 @@ static int get_dimm_config(struct mem_ctl_info *mci) } if (pvt->pci_ddrio) { - pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, ®); + pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, + ®); if (IS_RDIMM_ENABLED(reg)) { /* FIXME: Can also be LRDIMM */ edac_dbg(0, "Memory is registered\n"); @@ -629,19 +771,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci) * Step 1) Get TOLM/TOHM ranges */ - /* Address range is 32:28 */ - pci_read_config_dword(pvt->pci_sad1, TOLM, - ®); - pvt->tolm = GET_TOLM(reg); + pvt->tolm = pvt->info.get_tolm(pvt); tmp_mb = (1 + pvt->tolm) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); /* Address range is already 45:25 */ - pci_read_config_dword(pvt->pci_sad1, TOHM, - ®); - pvt->tohm = GET_TOHM(reg); + pvt->tohm = pvt->info.get_tohm(pvt); tmp_mb = (1 + pvt->tohm) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); @@ -654,9 +791,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci) * algorithm bellow. */ prv = 0; - for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { + for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { /* SAD_LIMIT Address range is 45:26 */ - pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], + pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], ®); limit = SAD_LIMIT(reg); @@ -677,15 +814,16 @@ static void get_memory_layout(const struct mem_ctl_info *mci) reg); prv = limit; - pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], + pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], ®); - sad_interl = sad_pkg(reg, 0); + sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); for (j = 0; j < 8; j++) { - if (j > 0 && sad_interl == sad_pkg(reg, j)) + u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j); + if (j > 0 && sad_interl == pkg) break; edac_dbg(0, "SAD#%d, interleave #%d: %d\n", - n_sads, j, sad_pkg(reg, j)); + n_sads, j, pkg); } } @@ -797,12 +935,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci, { struct mem_ctl_info *new_mci; struct sbridge_pvt *pvt = mci->pvt_info; + struct pci_dev *pci_ha; int n_rir, n_sads, n_tads, sad_way, sck_xch; int sad_interl, idx, base_ch; int interleave_mode; - unsigned sad_interleave[MAX_INTERLEAVE]; + unsigned sad_interleave[pvt->info.max_interleave]; u32 reg; - u8 ch_way,sck_way; + u8 ch_way, sck_way, pkg, sad_ha = 0; u32 tad_offset; u32 rir_way; u32 mb, kb; @@ -828,8 +967,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci, /* * Step 1) Get socket */ - for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { - pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], + for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { + pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], ®); if (!DRAM_RULE_ENABLE(reg)) @@ -844,53 +983,65 @@ static int get_memory_error_data(struct mem_ctl_info *mci, break; prv = limit; } - if (n_sads == MAX_SAD) { + if (n_sads == pvt->info.max_sad) { sprintf(msg, "Can't discover the memory socket"); return -EINVAL; } *area_type = get_dram_attr(reg); interleave_mode = INTERLEAVE_MODE(reg); - pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], + pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], ®); - sad_interl = sad_pkg(reg, 0); - for (sad_way = 0; sad_way < 8; sad_way++) { - if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way)) + + if (pvt->info.type == SANDY_BRIDGE) { + sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); + for (sad_way = 0; sad_way < 8; sad_way++) { + u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way); + if (sad_way > 0 && sad_interl == pkg) + break; + sad_interleave[sad_way] = pkg; + edac_dbg(0, "SAD interleave #%d: %d\n", + sad_way, sad_interleave[sad_way]); + } + edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", + pvt->sbridge_dev->mc, + n_sads, + addr, + limit, + sad_way + 7, + !interleave_mode ? "" : "XOR[18:16]"); + if (interleave_mode) + idx = ((addr >> 6) ^ (addr >> 16)) & 7; + else + idx = (addr >> 6) & 7; + switch (sad_way) { + case 1: + idx = 0; break; - sad_interleave[sad_way] = sad_pkg(reg, sad_way); - edac_dbg(0, "SAD interleave #%d: %d\n", - sad_way, sad_interleave[sad_way]); - } - edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", - pvt->sbridge_dev->mc, - n_sads, - addr, - limit, - sad_way + 7, - interleave_mode ? "" : "XOR[18:16]"); - if (interleave_mode) - idx = ((addr >> 6) ^ (addr >> 16)) & 7; - else + case 2: + idx = idx & 1; + break; + case 4: + idx = idx & 3; + break; + case 8: + break; + default: + sprintf(msg, "Can't discover socket interleave"); + return -EINVAL; + } + *socket = sad_interleave[idx]; + edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", + idx, sad_way, *socket); + } else { + /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */ idx = (addr >> 6) & 7; - switch (sad_way) { - case 1: - idx = 0; - break; - case 2: - idx = idx & 1; - break; - case 4: - idx = idx & 3; - break; - case 8: - break; - default: - sprintf(msg, "Can't discover socket interleave"); - return -EINVAL; + pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); + *socket = sad_pkg_socket(pkg); + sad_ha = sad_pkg_ha(pkg); + edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", + idx, *socket, sad_ha); } - *socket = sad_interleave[idx]; - edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", - idx, sad_way, *socket); /* * Move to the proper node structure, in order to access the @@ -909,9 +1060,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci, * Step 2) Get memory channel */ prv = 0; + if (pvt->info.type == SANDY_BRIDGE) + pci_ha = pvt->pci_ha0; + else { + if (sad_ha) + pci_ha = pvt->pci_ha1; + else + pci_ha = pvt->pci_ha0; + } for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { - pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], - ®); + pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®); limit = TAD_LIMIT(reg); if (limit <= prv) { sprintf(msg, "Can't discover the memory channel"); @@ -921,14 +1079,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci, break; prv = limit; } + if (n_tads == MAX_TAD) { + sprintf(msg, "Can't discover the memory channel"); + return -EINVAL; + } + ch_way = TAD_CH(reg) + 1; sck_way = TAD_SOCK(reg) + 1; - /* - * FIXME: Is it right to always use channel 0 for offsets? - */ - pci_read_config_dword(pvt->pci_tad[0], - tad_ch_nilv_offset[n_tads], - &tad_offset); if (ch_way == 3) idx = addr >> 6; @@ -958,6 +1115,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci, } *channel_mask = 1 << base_ch; + pci_read_config_dword(pvt->pci_tad[base_ch], + tad_ch_nilv_offset[n_tads], + &tad_offset); + if (pvt->is_mirrored) { *channel_mask |= 1 << ((base_ch + 2) % 4); switch(ch_way) { @@ -1091,12 +1252,6 @@ static void sbridge_put_all_devices(void) } } -/* - * sbridge_get_all_devices Find and perform 'get' operation on the MCH's - * device/functions we want to reference for this driver - * - * Need to 'get' device 16 func 1 and func 2 - */ static int sbridge_get_onedevice(struct pci_dev **prev, u8 *num_mc, const struct pci_id_table *table, @@ -1198,11 +1353,21 @@ static int sbridge_get_onedevice(struct pci_dev **prev, return 0; } -static int sbridge_get_all_devices(u8 *num_mc) +/* + * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's + * device/functions we want to reference for this driver. + * Need to 'get' device 16 func 1 and func 2. + * @num_mc: pointer to the memory controllers count, to be incremented in case + * of success. + * @table: model specific table + * + * returns 0 in case of success or error code + */ +static int sbridge_get_all_devices(u8 *num_mc, + const struct pci_id_table *table) { int i, rc; struct pci_dev *pdev = NULL; - const struct pci_id_table *table = pci_dev_descr_sbridge_table; while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { @@ -1226,8 +1391,8 @@ static int sbridge_get_all_devices(u8 *num_mc) return 0; } -static int mci_bind_devs(struct mem_ctl_info *mci, - struct sbridge_dev *sbridge_dev) +static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, + struct sbridge_dev *sbridge_dev) { struct sbridge_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; @@ -1255,7 +1420,7 @@ static int mci_bind_devs(struct mem_ctl_info *mci, case 13: switch (func) { case 6: - pvt->pci_br = pdev; + pvt->pci_br0 = pdev; break; default: goto error; @@ -1329,6 +1494,131 @@ error: return -EINVAL; } +static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, + struct sbridge_dev *sbridge_dev) +{ + struct sbridge_pvt *pvt = mci->pvt_info; + struct pci_dev *pdev, *tmp; + int i, func, slot; + bool mode_2ha = false; + + tmp = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL); + if (tmp) { + mode_2ha = true; + pci_dev_put(tmp); + } + + for (i = 0; i < sbridge_dev->n_devs; i++) { + pdev = sbridge_dev->pdev[i]; + if (!pdev) + continue; + slot = PCI_SLOT(pdev->devfn); + func = PCI_FUNC(pdev->devfn); + + switch (slot) { + case 14: + if (func == 0) { + pvt->pci_ha0 = pdev; + break; + } + goto error; + case 15: + switch (func) { + case 0: + pvt->pci_ta = pdev; + break; + case 1: + pvt->pci_ras = pdev; + break; + case 4: + case 5: + /* if we have 2 HAs active, channels 2 and 3 + * are in other device */ + if (mode_2ha) + break; + /* fall through */ + case 2: + case 3: + pvt->pci_tad[func - 2] = pdev; + break; + default: + goto error; + } + break; + case 17: + if (func == 4) { + pvt->pci_ddrio = pdev; + break; + } else if (func == 0) { + if (!mode_2ha) + pvt->pci_ddrio = pdev; + break; + } + goto error; + case 22: + switch (func) { + case 0: + pvt->pci_sad0 = pdev; + break; + case 1: + pvt->pci_br0 = pdev; + break; + case 2: + pvt->pci_br1 = pdev; + break; + default: + goto error; + } + break; + case 28: + if (func == 0) { + pvt->pci_ha1 = pdev; + break; + } + goto error; + case 29: + /* we shouldn't have this device if we have just one + * HA present */ + WARN_ON(!mode_2ha); + if (func == 2 || func == 3) { + pvt->pci_tad[func] = pdev; + break; + } + goto error; + default: + goto error; + } + + edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", + sbridge_dev->bus, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), + pdev); + } + + /* Check if everything were registered */ + if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 || + !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras || + !pvt->pci_ta) + goto enodev; + + for (i = 0; i < NUM_CHANNELS; i++) { + if (!pvt->pci_tad[i]) + goto enodev; + } + return 0; + +enodev: + sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); + return -ENODEV; + +error: + sbridge_printk(KERN_ERR, + "Device %d, function %d is out of the expected range\n", + slot, func); + return -EINVAL; +} + /**************************************************************************** Error check routines ****************************************************************************/ @@ -1349,7 +1639,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); - bool recoverable = GET_BITFIELD(m->status, 56, 56); + bool recoverable; u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); u32 mscod = GET_BITFIELD(m->status, 16, 31); u32 errcode = GET_BITFIELD(m->status, 0, 15); @@ -1360,6 +1650,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, int rc, dimm; char *area_type = NULL; + if (pvt->info.type == IVY_BRIDGE) + recoverable = true; + else + recoverable = GET_BITFIELD(m->status, 56, 56); + if (uncorrected_error) { if (ripv) { type = "FATAL"; @@ -1409,6 +1704,10 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, } } + /* Only decode errors with an valid address (ADDRV) */ + if (!GET_BITFIELD(m->status, 58, 58)) + return; + rc = get_memory_error_data(mci, m->addr, &socket, &channel_mask, &rank, &area_type, msg); if (rc < 0) @@ -1614,11 +1913,12 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) sbridge_dev->mci = NULL; } -static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) +static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) { struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; struct sbridge_pvt *pvt; + struct pci_dev *pdev = sbridge_dev->pdev[0]; int rc; /* Check the number of active and not disabled channels */ @@ -1640,7 +1940,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) return -ENOMEM; edac_dbg(0, "MC: mci = %p, dev = %p\n", - mci, &sbridge_dev->pdev[0]->dev); + mci, &pdev->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); @@ -1654,24 +1954,52 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "sbridge_edac.c"; mci->mod_ver = SBRIDGE_REVISION; - mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); - mci->dev_name = pci_name(sbridge_dev->pdev[0]); + mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; /* Set the function pointer to an actual operation function */ mci->edac_check = sbridge_check_error; - /* Store pci devices at mci for faster access */ - rc = mci_bind_devs(mci, sbridge_dev); - if (unlikely(rc < 0)) - goto fail0; + pvt->info.type = type; + if (type == IVY_BRIDGE) { + pvt->info.rankcfgr = IB_RANK_CFG_A; + pvt->info.get_tolm = ibridge_get_tolm; + pvt->info.get_tohm = ibridge_get_tohm; + pvt->info.dram_rule = ibridge_dram_rule; + pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); + pvt->info.interleave_list = ibridge_interleave_list; + pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); + pvt->info.interleave_pkg = ibridge_interleave_pkg; + mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); + + /* Store pci devices at mci for faster access */ + rc = ibridge_mci_bind_devs(mci, sbridge_dev); + if (unlikely(rc < 0)) + goto fail0; + } else { + pvt->info.rankcfgr = SB_RANK_CFG_A; + pvt->info.get_tolm = sbridge_get_tolm; + pvt->info.get_tohm = sbridge_get_tohm; + pvt->info.dram_rule = sbridge_dram_rule; + pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); + pvt->info.interleave_list = sbridge_interleave_list; + pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); + pvt->info.interleave_pkg = sbridge_interleave_pkg; + mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); + + /* Store pci devices at mci for faster access */ + rc = sbridge_mci_bind_devs(mci, sbridge_dev); + if (unlikely(rc < 0)) + goto fail0; + } + /* Get dimm basic config and the memory layout */ get_dimm_config(mci); get_memory_layout(mci); /* record ptr to the generic device */ - mci->pdev = &sbridge_dev->pdev[0]->dev; + mci->pdev = &pdev->dev; /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc(mci))) { @@ -1702,6 +2030,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) int rc; u8 mc, num_mc = 0; struct sbridge_dev *sbridge_dev; + enum type type; /* get the pci devices we want to reserve for our use */ mutex_lock(&sbridge_edac_lock); @@ -1715,7 +2044,13 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) } probed++; - rc = sbridge_get_all_devices(&num_mc); + if (pdev->device == PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA) { + rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); + type = IVY_BRIDGE; + } else { + rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); + type = SANDY_BRIDGE; + } if (unlikely(rc < 0)) goto fail0; mc = 0; @@ -1724,7 +2059,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) edac_dbg(0, "Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc); sbridge_dev->mc = mc++; - rc = sbridge_register_mci(sbridge_dev); + rc = sbridge_register_mci(sbridge_dev, type); if (unlikely(rc < 0)) goto fail1; } @@ -1839,5 +2174,5 @@ MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); -MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - " +MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - " SBRIDGE_REVISION); diff --git a/drivers/fmc/Kconfig b/drivers/fmc/Kconfig index c01cf45bc3d8..3a75f4256d08 100644 --- a/drivers/fmc/Kconfig +++ b/drivers/fmc/Kconfig @@ -46,6 +46,6 @@ config FMC_CHARDEV This driver matches every mezzanine device and allows user space to read and write registers using a char device. It can be used to write user-space drivers, or just get - aquainted with a mezzanine before writing its specific driver. + acquainted with a mezzanine before writing its specific driver. endif # FMC diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7dd446150294..4e10b10d3ddd 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -13,6 +13,7 @@ #include <linux/acpi_gpio.h> #include <linux/idr.h> #include <linux/slab.h> +#include <linux/acpi.h> #define CREATE_TRACE_POINTS #include <trace/events/gpio.h> diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index 0cfb60f54766..d18b88b755c3 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -67,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev, goto fail; } - if (!client->driver) { + if (!client->dev.driver) { err = -ENODEV; goto fail_unregister; } - module = client->driver->driver.owner; + module = client->dev.driver->owner; if (!try_module_get(module)) { err = -ENODEV; goto fail_unregister; @@ -80,7 +80,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, encoder->bus_priv = client; - encoder_drv = to_drm_i2c_encoder_driver(client->driver); + encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver)); err = encoder_drv->encoder_init(client, dev, encoder); if (err) @@ -111,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder) { struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder); struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder); - struct module *module = client->driver->driver.owner; + struct module *module = client->dev.driver->owner; i2c_unregister_device(client); encoder->bus_priv = NULL; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index d80d95289e10..64c34d5876ff 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -455,8 +455,8 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) /* Dot clock in Hz: */ dotclock = (u64) crtc->hwmode.clock * 1000; - /* Fields of interlaced scanout modes are only halve a frame duration. - * Double the dotclock to get halve the frame-/line-/pixelduration. + /* Fields of interlaced scanout modes are only half a frame duration. + * Double the dotclock to get half the frame-/line-/pixelduration. */ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) dotclock *= 2; diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index c200136a5d8e..f53d5246979c 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -338,7 +338,7 @@ err_idr: */ static void drm_unplug_minor(struct drm_minor *minor) { - if (!minor || !device_is_registered(minor->kdev)) + if (!minor || !minor->kdev) return; #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 43959edd4291..dfff0907f70e 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -196,7 +196,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) acpi_handle dhandle; int ret; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 1b2f41c3f191..6d69a9bad865 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -638,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev) u32 temp; int i = 0; - handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&dev->pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c index e286e132c7e7..129120473f6c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c @@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version) acpi_handle handle; int ret; - handle = DEVICE_ACPI_HANDLE(&device->pdev->dev); + handle = ACPI_HANDLE(&device->pdev->dev); if (!handle) return false; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index 13b850076443..e44ed7b93c6d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c @@ -41,7 +41,8 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, if (!client) return false; - if (!client->driver || client->driver->detect(client, info)) { + if (!client->dev.driver || + to_i2c_driver(client->dev.driver)->detect(client, info)) { i2c_unregister_device(client); return false; } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 07273a2ae62f..95c740454049 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -256,7 +256,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) acpi_handle dhandle; int retval = 0; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -414,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev) if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) return false; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -448,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) return NULL; } - handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&dev->pdev->dev); if (!handle) return NULL; diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 10f98c7742d8..98a9074b306b 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev, return NOTIFY_DONE; /* Check pending SBIOS requests */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); count = radeon_atif_get_sbios_requests(handle, &req); if (count <= 0) @@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev) struct radeon_atcs *atcs = &rdev->atcs; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); if (!handle) return -EINVAL; @@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev, u32 retry = 3; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); if (!handle) return -EINVAL; @@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev) int ret; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); /* No need to proceed if we're sure that ATIF is not supported */ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6153ec18943a..9d302eaeea15 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -8,8 +8,7 @@ */ #include <linux/vga_switcheroo.h> #include <linux/slab.h> -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> +#include <linux/acpi.h> #include <linux/pci.h> #include "radeon_acpi.h" @@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) acpi_handle dhandle, atpx_handle; acpi_status status; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -493,7 +492,7 @@ static int radeon_atpx_init(void) */ static int radeon_atpx_get_client_id(struct pci_dev *pdev) { - if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; else return VGA_SWITCHEROO_DIS; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index c155d6f3fa68..b3633d9a5317 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) return false; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) continue; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index c91d547191dd..329fbb9b5976 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -242,6 +242,7 @@ config HID_HOLTEK - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / Zalman ZM-GM1 - SHARKOON DarkGlider Gaming mouse + - LEETGION Hellion Gaming Mouse config HOLTEK_FF bool "Holtek On Line Grip force feedback support" @@ -323,7 +324,7 @@ config HID_LCPOWER config HID_LENOVO_TPKBD tristate "Lenovo ThinkPad USB Keyboard with TrackPoint" - depends on USB_HID + depends on HID select NEW_LEDS select LEDS_CLASS ---help--- @@ -362,19 +363,20 @@ config LOGITECH_FF - Logitech WingMan Force 3D - Logitech Formula Force EX - Logitech WingMan Formula Force GP - - Logitech MOMO Force wheel and if you want to enable force feedback for them. Note: if you say N here, this device will still be supported, but without force feedback. config LOGIRUMBLEPAD2_FF - bool "Logitech RumblePad/Rumblepad 2 force feedback support" + bool "Logitech force feedback support (variant 2)" depends on HID_LOGITECH select INPUT_FF_MEMLESS help - Say Y here if you want to enable force feedback support for Logitech - RumblePad and Rumblepad 2 devices. + Say Y here if you want to enable force feedback support for: + - Logitech RumblePad + - Logitech Rumblepad 2 + - Logitech Formula Vibration Feedback Wheel config LOGIG940_FF bool "Logitech Flight System G940 force feedback support" @@ -437,6 +439,7 @@ config HID_MULTITOUCH - Chunghwa panels - CVTouch panels - Cypress TrueTouch panels + - Elan Microelectronics touch panels - Elo TouchSystems IntelliTouch Plus panels - GeneralTouch 'Sensing Win7-TwoFinger' panels - GoodTouch panels @@ -453,6 +456,7 @@ config HID_MULTITOUCH - Pixcir dual touch panels - Quanta panels - eGalax dual-touch panels, including the Joojoo and Wetab tablets + - SiS multitouch panels - Stantum multitouch panels - Touch International Panels - Unitec Panels @@ -614,6 +618,14 @@ config HID_SONY * Sony PS3 Blue-ray Disk Remote Control (Bluetooth) * Logitech Harmony adapter for Sony Playstation 3 (Bluetooth) +config SONY_FF + bool "Sony PS2/3 accessories force feedback support" + depends on HID_SONY + select INPUT_FF_MEMLESS + ---help--- + Say Y here if you have a Sony PS2/3 accessory and want to enable force + feedback support for it. + config HID_SPEEDLINK tristate "Speedlink VAD Cezanne mouse support" depends on HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index a959f4aecaf5..30e44318f87f 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -95,7 +95,7 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \ hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \ hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \ - hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-savu.o + hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-ryos.o hid-roccat-savu.o obj-$(CONFIG_HID_SAITEK) += hid-saitek.o obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 881cf7b4f9a4..497558127bb3 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -46,6 +46,12 @@ module_param(iso_layout, uint, 0644); MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. " "(0 = disabled, [1] = enabled)"); +static unsigned int swap_opt_cmd; +module_param(swap_opt_cmd, uint, 0644); +MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") keys. " + "(For people who want to keep Windows PC keyboard muscle memory. " + "[0] = as-is, Mac layout. 1 = swapped, Windows layout.)"); + struct apple_sc { unsigned long quirks; unsigned int fn_on; @@ -150,6 +156,14 @@ static const struct apple_key_translation apple_iso_keyboard[] = { { } }; +static const struct apple_key_translation swapped_option_cmd_keys[] = { + { KEY_LEFTALT, KEY_LEFTMETA }, + { KEY_LEFTMETA, KEY_LEFTALT }, + { KEY_RIGHTALT, KEY_RIGHTMETA }, + { KEY_RIGHTMETA,KEY_RIGHTALT }, + { } +}; + static const struct apple_key_translation *apple_find_translation( const struct apple_key_translation *table, u16 from) { @@ -242,6 +256,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, } } + if (swap_opt_cmd) { + trans = apple_find_translation(swapped_option_cmd_keys, usage->code); + if (trans) { + input_event(input, usage->type, trans->to, value); + return 1; + } + } + return 0; } diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c index 64ab94a55aa7..a594e478a1e2 100644 --- a/drivers/hid/hid-axff.c +++ b/drivers/hid/hid-axff.c @@ -95,7 +95,7 @@ static int axff_init(struct hid_device *hid) } } - if (field_count < 4) { + if (field_count < 4 && hid->product != 0xf705) { hid_err(hid, "not enough fields in the report: %d\n", field_count); return -ENODEV; @@ -180,6 +180,7 @@ static void ax_remove(struct hid_device *hdev) static const struct hid_device_id ax_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), }, + { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705), }, { } }; MODULE_DEVICE_TABLE(hid, ax_devices); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e80da62363bc..8c10f2742233 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1418,10 +1418,8 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { ret = hdrv->raw_event(hid, report, data, size); - if (ret < 0) { - ret = ret < 0 ? ret : 0; + if (ret < 0) goto unlock; - } } ret = hid_report_raw_event(hid, type, data, size, interrupt); @@ -1605,6 +1603,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, @@ -1716,6 +1715,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, @@ -1754,6 +1754,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, @@ -1801,21 +1802,28 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, #if IS_ENABLED(CONFIG_HID_ROCCAT) - { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, @@ -2376,15 +2384,6 @@ bool hid_ignore(struct hid_device *hdev) hdev->type == HID_TYPE_USBNONE) return true; break; - case USB_VENDOR_ID_DWAV: - /* These are handled by usbtouchscreen. hdev->type is probably - * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match - * usbtouchscreen. */ - if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER || - hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) && - hdev->type != HID_TYPE_USBMOUSE) - return true; - break; case USB_VENDOR_ID_VELLEMAN: /* These are not HID devices. They are handled by comedi. */ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST && diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index f042a6cf8b18..4e49462870ab 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -181,7 +181,40 @@ fail: */ static bool elo_broken_firmware(struct usb_device *dev) { - return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d; + struct usb_device *hub = dev->parent; + struct usb_device *child = NULL; + u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice); + u16 child_vid, child_pid; + int i; + + if (!use_fw_quirk) + return false; + if (fw_lvl != 0x10d) + return false; + + /* iterate sibling devices of the touch controller */ + usb_hub_for_each_child(hub, i, child) { + child_vid = le16_to_cpu(child->descriptor.idVendor); + child_pid = le16_to_cpu(child->descriptor.idProduct); + + /* + * If one of the devices below is present attached as a sibling of + * the touch controller then this is a newer IBM 4820 monitor that + * does not need the IBM-requested workaround if fw level is + * 0x010d - aka 'M'. + * No other HW can have this combination. + */ + if (child_vid==0x04b3) { + switch (child_pid) { + case 0x4676: /* 4820 21x Video */ + case 0x4677: /* 4820 51x Video */ + case 0x4678: /* 4820 2Lx Video */ + case 0x4679: /* 4820 5Lx Video */ + return false; + } + } + } + return true; } static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index e696566cde46..0caa676de622 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c @@ -28,6 +28,7 @@ * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 * and Zalman ZM-GM1 * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse + * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse */ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, @@ -40,6 +41,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, * 0x2fff, so they don't exceed HID_MAX_USAGES */ switch (hdev->product) { case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067: + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072: if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f && rdesc[120] == 0xff && rdesc[121] == 0x7f) { hid_info(hdev, "Fixing up report descriptor\n"); @@ -66,6 +68,8 @@ static const struct hid_device_id holtek_mouse_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { } }; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index f0296a50be5f..76559629568c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -332,6 +332,11 @@ #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 #define USB_VENDOR_ID_GLAB 0x06c2 #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 @@ -448,8 +453,9 @@ #define USB_VENDOR_ID_HOLTEK_ALT 0x04d9 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 -#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a +#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 +#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 #define USB_VENDOR_ID_IMATION 0x0718 @@ -571,6 +577,7 @@ #define USB_DEVICE_ID_DINOVO_EDGE 0xc714 #define USB_DEVICE_ID_DINOVO_MINI 0xc71f #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03 +#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04 #define USB_VENDOR_ID_LUMIO 0x202e #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 @@ -726,6 +733,9 @@ #define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e #define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6 +#define USB_DEVICE_ID_ROCCAT_RYOS_MK 0x3138 +#define USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW 0x31ce +#define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232 #define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a #define USB_VENDOR_ID_SAITEK 0x06a3 @@ -745,6 +755,10 @@ #define USB_VENDOR_ID_SIGMATEL 0x066F #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780 +#define USB_VENDOR_ID_SIS2_TOUCH 0x0457 +#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200 +#define USB_DEVICE_ID_SIS817_TOUCH 0x0817 + #define USB_VENDOR_ID_SKYCABLE 0x1223 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c index 31cf29a6ba17..2d25b6cbbc05 100644 --- a/drivers/hid/hid-lenovo-tpkbd.c +++ b/drivers/hid/hid-lenovo-tpkbd.c @@ -14,11 +14,9 @@ #include <linux/module.h> #include <linux/sysfs.h> #include <linux/device.h> -#include <linux/usb.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/leds.h> -#include "usbhid/usbhid.h" #include "hid-ids.h" @@ -41,10 +39,9 @@ static int tpkbd_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { - struct usbhid_device *uhdev; - - uhdev = (struct usbhid_device *) hdev->driver_data; - if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) { + if (usage->hid == (HID_UP_BUTTON | 0x0010)) { + /* mark the device as pointer */ + hid_set_drvdata(hdev, (void *)1); map_key_clear(KEY_MICMUTE); return 1; } @@ -339,7 +336,7 @@ static int tpkbd_probe_tp(struct hid_device *hdev) struct tpkbd_data_pointer *data_pointer; size_t name_sz = strlen(dev_name(dev)) + 16; char *name_mute, *name_micmute; - int i, ret; + int i; /* Validate required reports. */ for (i = 0; i < 4; i++) { @@ -354,7 +351,9 @@ static int tpkbd_probe_tp(struct hid_device *hdev) hid_warn(hdev, "Could not create sysfs group\n"); } - data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL); + data_pointer = devm_kzalloc(&hdev->dev, + sizeof(struct tpkbd_data_pointer), + GFP_KERNEL); if (data_pointer == NULL) { hid_err(hdev, "Could not allocate memory for driver data\n"); return -ENOMEM; @@ -364,20 +363,13 @@ static int tpkbd_probe_tp(struct hid_device *hdev) data_pointer->sensitivity = 0xa0; data_pointer->press_speed = 0x38; - name_mute = kzalloc(name_sz, GFP_KERNEL); - if (name_mute == NULL) { + name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); + name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); + if (name_mute == NULL || name_micmute == NULL) { hid_err(hdev, "Could not allocate memory for led data\n"); - ret = -ENOMEM; - goto err; + return -ENOMEM; } snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev)); - - name_micmute = kzalloc(name_sz, GFP_KERNEL); - if (name_micmute == NULL) { - hid_err(hdev, "Could not allocate memory for led data\n"); - ret = -ENOMEM; - goto err2; - } snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev)); hid_set_drvdata(hdev, data_pointer); @@ -397,19 +389,12 @@ static int tpkbd_probe_tp(struct hid_device *hdev) tpkbd_features_set(hdev); return 0; - -err2: - kfree(name_mute); -err: - kfree(data_pointer); - return ret; } static int tpkbd_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; - struct usbhid_device *uhdev; ret = hid_parse(hdev); if (ret) { @@ -423,9 +408,8 @@ static int tpkbd_probe(struct hid_device *hdev, goto err; } - uhdev = (struct usbhid_device *) hdev->driver_data; - - if (uhdev->ifnum == 1) { + if (hid_get_drvdata(hdev)) { + hid_set_drvdata(hdev, NULL); ret = tpkbd_probe_tp(hdev); if (ret) goto err_hid; @@ -449,17 +433,11 @@ static void tpkbd_remove_tp(struct hid_device *hdev) led_classdev_unregister(&data_pointer->led_mute); hid_set_drvdata(hdev, NULL); - kfree(data_pointer->led_micmute.name); - kfree(data_pointer->led_mute.name); - kfree(data_pointer); } static void tpkbd_remove(struct hid_device *hdev) { - struct usbhid_device *uhdev; - - uhdev = (struct usbhid_device *) hdev->driver_data; - if (uhdev->ifnum == 1) + if (hid_get_drvdata(hdev)) tpkbd_remove_tp(hdev); hid_hw_stop(hdev); diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 6f12ecd36c88..06eb45fa6331 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -45,7 +45,9 @@ /* Size of the original descriptors of the Driving Force (and Pro) wheels */ #define DF_RDESC_ORIG_SIZE 130 #define DFP_RDESC_ORIG_SIZE 97 +#define FV_RDESC_ORIG_SIZE 130 #define MOMO_RDESC_ORIG_SIZE 87 +#define MOMO2_RDESC_ORIG_SIZE 87 /* Fixed report descriptors for Logitech Driving Force (and Pro) * wheel controllers @@ -170,6 +172,73 @@ static __u8 dfp_rdesc_fixed[] = { 0xC0 /* End Collection */ }; +static __u8 fv_rdesc_fixed[] = { +0x05, 0x01, /* Usage Page (Desktop), */ +0x09, 0x04, /* Usage (Joystik), */ +0xA1, 0x01, /* Collection (Application), */ +0xA1, 0x02, /* Collection (Logical), */ +0x95, 0x01, /* Report Count (1), */ +0x75, 0x0A, /* Report Size (10), */ +0x15, 0x00, /* Logical Minimum (0), */ +0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ +0x35, 0x00, /* Physical Minimum (0), */ +0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ +0x09, 0x30, /* Usage (X), */ +0x81, 0x02, /* Input (Variable), */ +0x95, 0x0C, /* Report Count (12), */ +0x75, 0x01, /* Report Size (1), */ +0x25, 0x01, /* Logical Maximum (1), */ +0x45, 0x01, /* Physical Maximum (1), */ +0x05, 0x09, /* Usage Page (Button), */ +0x19, 0x01, /* Usage Minimum (01h), */ +0x29, 0x0C, /* Usage Maximum (0Ch), */ +0x81, 0x02, /* Input (Variable), */ +0x95, 0x02, /* Report Count (2), */ +0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ +0x09, 0x01, /* Usage (01h), */ +0x81, 0x02, /* Input (Variable), */ +0x09, 0x02, /* Usage (02h), */ +0x26, 0xFF, 0x00, /* Logical Maximum (255), */ +0x46, 0xFF, 0x00, /* Physical Maximum (255), */ +0x95, 0x01, /* Report Count (1), */ +0x75, 0x08, /* Report Size (8), */ +0x81, 0x02, /* Input (Variable), */ +0x05, 0x01, /* Usage Page (Desktop), */ +0x25, 0x07, /* Logical Maximum (7), */ +0x46, 0x3B, 0x01, /* Physical Maximum (315), */ +0x75, 0x04, /* Report Size (4), */ +0x65, 0x14, /* Unit (Degrees), */ +0x09, 0x39, /* Usage (Hat Switch), */ +0x81, 0x42, /* Input (Variable, Null State), */ +0x75, 0x01, /* Report Size (1), */ +0x95, 0x04, /* Report Count (4), */ +0x65, 0x00, /* Unit, */ +0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ +0x09, 0x01, /* Usage (01h), */ +0x25, 0x01, /* Logical Maximum (1), */ +0x45, 0x01, /* Physical Maximum (1), */ +0x81, 0x02, /* Input (Variable), */ +0x05, 0x01, /* Usage Page (Desktop), */ +0x95, 0x01, /* Report Count (1), */ +0x75, 0x08, /* Report Size (8), */ +0x26, 0xFF, 0x00, /* Logical Maximum (255), */ +0x46, 0xFF, 0x00, /* Physical Maximum (255), */ +0x09, 0x31, /* Usage (Y), */ +0x81, 0x02, /* Input (Variable), */ +0x09, 0x32, /* Usage (Z), */ +0x81, 0x02, /* Input (Variable), */ +0xC0, /* End Collection, */ +0xA1, 0x02, /* Collection (Logical), */ +0x26, 0xFF, 0x00, /* Logical Maximum (255), */ +0x46, 0xFF, 0x00, /* Physical Maximum (255), */ +0x95, 0x07, /* Report Count (7), */ +0x75, 0x08, /* Report Size (8), */ +0x09, 0x03, /* Usage (03h), */ +0x91, 0x02, /* Output (Variable), */ +0xC0, /* End Collection, */ +0xC0 /* End Collection */ +}; + static __u8 momo_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ @@ -216,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = { 0xC0 /* End Collection */ }; +static __u8 momo2_rdesc_fixed[] = { +0x05, 0x01, /* Usage Page (Desktop), */ +0x09, 0x04, /* Usage (Joystik), */ +0xA1, 0x01, /* Collection (Application), */ +0xA1, 0x02, /* Collection (Logical), */ +0x95, 0x01, /* Report Count (1), */ +0x75, 0x0A, /* Report Size (10), */ +0x15, 0x00, /* Logical Minimum (0), */ +0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ +0x35, 0x00, /* Physical Minimum (0), */ +0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ +0x09, 0x30, /* Usage (X), */ +0x81, 0x02, /* Input (Variable), */ +0x95, 0x0A, /* Report Count (10), */ +0x75, 0x01, /* Report Size (1), */ +0x25, 0x01, /* Logical Maximum (1), */ +0x45, 0x01, /* Physical Maximum (1), */ +0x05, 0x09, /* Usage Page (Button), */ +0x19, 0x01, /* Usage Minimum (01h), */ +0x29, 0x0A, /* Usage Maximum (0Ah), */ +0x81, 0x02, /* Input (Variable), */ +0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ +0x09, 0x00, /* Usage (00h), */ +0x95, 0x04, /* Report Count (4), */ +0x81, 0x02, /* Input (Variable), */ +0x95, 0x01, /* Report Count (1), */ +0x75, 0x08, /* Report Size (8), */ +0x26, 0xFF, 0x00, /* Logical Maximum (255), */ +0x46, 0xFF, 0x00, /* Physical Maximum (255), */ +0x09, 0x01, /* Usage (01h), */ +0x81, 0x02, /* Input (Variable), */ +0x05, 0x01, /* Usage Page (Desktop), */ +0x09, 0x31, /* Usage (Y), */ +0x81, 0x02, /* Input (Variable), */ +0x09, 0x32, /* Usage (Z), */ +0x81, 0x02, /* Input (Variable), */ +0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ +0x09, 0x00, /* Usage (00h), */ +0x81, 0x02, /* Input (Variable), */ +0xC0, /* End Collection, */ +0xA1, 0x02, /* Collection (Logical), */ +0x09, 0x02, /* Usage (02h), */ +0x95, 0x07, /* Report Count (7), */ +0x91, 0x02, /* Output (Variable), */ +0xC0, /* End Collection, */ +0xC0 /* End Collection */ +}; + /* * Certain Logitech keyboards send in report #3 keys which are far * above the logical maximum described in descriptor. This extends @@ -275,6 +392,24 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, } break; + case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: + if (*rsize == MOMO2_RDESC_ORIG_SIZE) { + hid_info(hdev, + "fixing up Logitech Momo Racing Force (Black) report descriptor\n"); + rdesc = momo2_rdesc_fixed; + *rsize = sizeof(momo2_rdesc_fixed); + } + break; + + case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: + if (*rsize == FV_RDESC_ORIG_SIZE) { + hid_info(hdev, + "fixing up Logitech Formula Vibration report descriptor\n"); + rdesc = fv_rdesc_fixed; + *rsize = sizeof(fv_rdesc_fixed); + } + break; + case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: if (*rsize == DFP_RDESC_ORIG_SIZE) { hid_info(hdev, @@ -492,6 +627,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi, case USB_DEVICE_ID_LOGITECH_G27_WHEEL: case USB_DEVICE_ID_LOGITECH_WII_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: + case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: field->application = HID_GD_MULTIAXIS; break; default: @@ -639,6 +775,8 @@ static const struct hid_device_id lg_devices[] = { .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2), .driver_data = LG_FF4 }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL), + .driver_data = LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL), diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c index 1a42eaa6ca02..0e3fb1a7e421 100644 --- a/drivers/hid/hid-lg2ff.c +++ b/drivers/hid/hid-lg2ff.c @@ -95,7 +95,7 @@ int lg2ff_init(struct hid_device *hid) hid_hw_request(hid, report, HID_REQ_SET_REPORT); - hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n"); + hid_info(hid, "Force feedback for Logitech variant 2 rumble devices by Anssi Hannula <anssi.hannula@gmail.com>\n"); return 0; } diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index 8782fe1aaa07..befe0e336471 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -196,6 +196,21 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e case FF_CONSTANT: x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */ CLAMP(x); + + if (x == 0x80) { + /* De-activate force in slot-1*/ + value[0] = 0x13; + value[1] = 0x00; + value[2] = 0x00; + value[3] = 0x00; + value[4] = 0x00; + value[5] = 0x00; + value[6] = 0x00; + + hid_hw_request(hid, report, HID_REQ_SET_REPORT); + return 0; + } + value[0] = 0x11; /* Slot 1 */ value[1] = 0x08; value[2] = x; @@ -218,12 +233,70 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); __s32 *value = report->field[0]->value; + __u32 expand_a, expand_b; + struct lg4ff_device_entry *entry; + struct lg_drv_data *drv_data; + + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Private driver data not found!\n"); + return; + } + + entry = drv_data->device_props; + if (!entry) { + hid_err(hid, "Device properties not found!\n"); + return; + } + + /* De-activate Auto-Center */ + if (magnitude == 0) { + value[0] = 0xf5; + value[1] = 0x00; + value[2] = 0x00; + value[3] = 0x00; + value[4] = 0x00; + value[5] = 0x00; + value[6] = 0x00; + + hid_hw_request(hid, report, HID_REQ_SET_REPORT); + return; + } + + if (magnitude <= 0xaaaa) { + expand_a = 0x0c * magnitude; + expand_b = 0x80 * magnitude; + } else { + expand_a = (0x0c * 0xaaaa) + 0x06 * (magnitude - 0xaaaa); + expand_b = (0x80 * 0xaaaa) + 0xff * (magnitude - 0xaaaa); + } + + /* Adjust for non-MOMO wheels */ + switch (entry->product_id) { + case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: + case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: + break; + default: + expand_a = expand_a >> 1; + break; + } value[0] = 0xfe; value[1] = 0x0d; - value[2] = magnitude >> 13; - value[3] = magnitude >> 13; - value[4] = magnitude >> 8; + value[2] = expand_a / 0xaaaa; + value[3] = expand_a / 0xaaaa; + value[4] = expand_b / 0xaaaa; + value[5] = 0x00; + value[6] = 0x00; + + hid_hw_request(hid, report, HID_REQ_SET_REPORT); + + /* Activate Auto-Center */ + value[0] = 0x14; + value[1] = 0x00; + value[2] = 0x00; + value[3] = 0x00; + value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; @@ -540,17 +613,6 @@ int lg4ff_init(struct hid_device *hid) if (error) return error; - /* Check if autocentering is available and - * set the centering force to zero by default */ - if (test_bit(FF_AUTOCENTER, dev->ffbit)) { - if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */ - dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex; - else - dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default; - - dev->ff->set_autocenter(dev, 0); - } - /* Get private driver data */ drv_data = hid_get_drvdata(hid); if (!drv_data) { @@ -571,6 +633,17 @@ int lg4ff_init(struct hid_device *hid) entry->max_range = lg4ff_devices[i].max_range; entry->set_range = lg4ff_devices[i].set_range; + /* Check if autocentering is available and + * set the centering force to zero by default */ + if (test_bit(FF_AUTOCENTER, dev->ffbit)) { + if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */ + dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex; + else + dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default; + + dev->ff->set_autocenter(dev, 0); + } + /* Create sysfs interface */ error = device_create_file(&hid->dev, &dev_attr_range); if (error) diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 2e5302462efb..a7947d8251a8 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -542,9 +542,9 @@ static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, return 0; } -static void rdcat(char **rdesc, unsigned int *rsize, const char *data, unsigned int size) +static void rdcat(char *rdesc, unsigned int *rsize, const char *data, unsigned int size) { - memcpy(*rdesc + *rsize, data, size); + memcpy(rdesc + *rsize, data, size); *rsize += size; } @@ -567,31 +567,31 @@ static int logi_dj_ll_parse(struct hid_device *hid) if (djdev->reports_supported & STD_KEYBOARD) { dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n", __func__, djdev->reports_supported); - rdcat(&rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor)); + rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor)); } if (djdev->reports_supported & STD_MOUSE) { dbg_hid("%s: sending a mouse descriptor, reports_supported: " "%x\n", __func__, djdev->reports_supported); - rdcat(&rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor)); + rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor)); } if (djdev->reports_supported & MULTIMEDIA) { dbg_hid("%s: sending a multimedia report descriptor: %x\n", __func__, djdev->reports_supported); - rdcat(&rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor)); + rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor)); } if (djdev->reports_supported & POWER_KEYS) { dbg_hid("%s: sending a power keys report descriptor: %x\n", __func__, djdev->reports_supported); - rdcat(&rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor)); + rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor)); } if (djdev->reports_supported & MEDIA_CENTER) { dbg_hid("%s: sending a media center report descriptor: %x\n", __func__, djdev->reports_supported); - rdcat(&rdesc, &rsize, media_descriptor, sizeof(media_descriptor)); + rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor)); } if (djdev->reports_supported & KBD_LEDS) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 5e5fe1b8eebb..a2cedb8ae1c0 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -250,12 +250,12 @@ static struct mt_class mt_classes[] = { { .name = MT_CLS_GENERALTOUCH_TWOFINGERS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_VALID_IS_INRANGE | - MT_QUIRK_SLOT_IS_CONTACTNUMBER, + MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | - MT_QUIRK_SLOT_IS_CONTACTNUMBER + MT_QUIRK_SLOT_IS_CONTACTID }, { .name = MT_CLS_FLATFROG, @@ -1173,6 +1173,21 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) }, + { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, + MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, + USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) }, + { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, + MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, + USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) }, + { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, + MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, + USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) }, + { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, + MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, + USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) }, + { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, + MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, + USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) }, /* Gametel game controller */ { .driver_data = MT_CLS_NSMU, @@ -1284,6 +1299,14 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) }, + /* SiS panels */ + { .driver_data = MT_CLS_DEFAULT, + HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, + USB_DEVICE_ID_SIS9200_TOUCH) }, + { .driver_data = MT_CLS_DEFAULT, + HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, + USB_DEVICE_ID_SIS817_TOUCH) }, + /* Stantum panels */ { .driver_data = MT_CLS_CONFIDENCE, MT_USB_DEVICE(USB_VENDOR_ID_STANTUM, diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c index 74f704032627..02e28e9f4ea7 100644 --- a/drivers/hid/hid-roccat-common.c +++ b/drivers/hid/hid-roccat-common.c @@ -65,10 +65,11 @@ int roccat_common2_send(struct usb_device *usb_dev, uint report_id, EXPORT_SYMBOL_GPL(roccat_common2_send); enum roccat_common2_control_states { - ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0, + ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0, ROCCAT_COMMON_CONTROL_STATUS_OK = 1, ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2, - ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3, + ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3, + ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4, }; static int roccat_common2_receive_control_status(struct usb_device *usb_dev) @@ -88,13 +89,12 @@ static int roccat_common2_receive_control_status(struct usb_device *usb_dev) switch (control.value) { case ROCCAT_COMMON_CONTROL_STATUS_OK: return 0; - case ROCCAT_COMMON_CONTROL_STATUS_WAIT: + case ROCCAT_COMMON_CONTROL_STATUS_BUSY: msleep(500); continue; case ROCCAT_COMMON_CONTROL_STATUS_INVALID: - - case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD: - /* seems to be critical - replug necessary */ + case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL: + case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW: return -EINVAL; default: dev_err(&usb_dev->dev, @@ -122,6 +122,59 @@ int roccat_common2_send_with_status(struct usb_device *usb_dev, } EXPORT_SYMBOL_GPL(roccat_common2_send_with_status); +int roccat_common2_device_init_struct(struct usb_device *usb_dev, + struct roccat_common2_device *dev) +{ + mutex_init(&dev->lock); + return 0; +} +EXPORT_SYMBOL_GPL(roccat_common2_device_init_struct); + +ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj, + char *buf, loff_t off, size_t count, + size_t real_size, uint command) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval; + + if (off >= real_size) + return 0; + + if (off != 0 || count != real_size) + return -EINVAL; + + mutex_lock(&roccat_dev->lock); + retval = roccat_common2_receive(usb_dev, command, buf, real_size); + mutex_unlock(&roccat_dev->lock); + + return retval ? retval : real_size; +} +EXPORT_SYMBOL_GPL(roccat_common2_sysfs_read); + +ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj, + void const *buf, loff_t off, size_t count, + size_t real_size, uint command) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval; + + if (off != 0 || count != real_size) + return -EINVAL; + + mutex_lock(&roccat_dev->lock); + retval = roccat_common2_send_with_status(usb_dev, command, buf, real_size); + mutex_unlock(&roccat_dev->lock); + + return retval ? retval : real_size; +} +EXPORT_SYMBOL_GPL(roccat_common2_sysfs_write); + MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat common driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h index a97746a63b70..eaa56eb7d5d1 100644 --- a/drivers/hid/hid-roccat-common.h +++ b/drivers/hid/hid-roccat-common.h @@ -32,4 +32,66 @@ int roccat_common2_send(struct usb_device *usb_dev, uint report_id, int roccat_common2_send_with_status(struct usb_device *usb_dev, uint command, void const *buf, uint size); +struct roccat_common2_device { + int roccat_claimed; + int chrdev_minor; + struct mutex lock; +}; + +int roccat_common2_device_init_struct(struct usb_device *usb_dev, + struct roccat_common2_device *dev); +ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj, + char *buf, loff_t off, size_t count, + size_t real_size, uint command); +ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj, + void const *buf, loff_t off, size_t count, + size_t real_size, uint command); + +#define ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \ +static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \ + struct kobject *kobj, struct bin_attribute *attr, char *buf, \ + loff_t off, size_t count) \ +{ \ + return roccat_common2_sysfs_write(fp, kobj, buf, off, count, \ + SIZE, COMMAND); \ +} + +#define ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) \ +static ssize_t roccat_common2_sysfs_read_ ## thingy(struct file *fp, \ + struct kobject *kobj, struct bin_attribute *attr, char *buf, \ + loff_t off, size_t count) \ +{ \ + return roccat_common2_sysfs_read(fp, kobj, buf, off, count, \ + SIZE, COMMAND); \ +} + +#define ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE) \ +ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \ +ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) + +#define ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(thingy, COMMAND, SIZE) \ +ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE); \ +static struct bin_attribute bin_attr_ ## thingy = { \ + .attr = { .name = #thingy, .mode = 0660 }, \ + .size = SIZE, \ + .read = roccat_common2_sysfs_read_ ## thingy, \ + .write = roccat_common2_sysfs_write_ ## thingy \ +} + +#define ROCCAT_COMMON2_BIN_ATTRIBUTE_R(thingy, COMMAND, SIZE) \ +ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE); \ +static struct bin_attribute bin_attr_ ## thingy = { \ + .attr = { .name = #thingy, .mode = 0440 }, \ + .size = SIZE, \ + .read = roccat_common2_sysfs_read_ ## thingy, \ +} + +#define ROCCAT_COMMON2_BIN_ATTRIBUTE_W(thingy, COMMAND, SIZE) \ +ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE); \ +static struct bin_attribute bin_attr_ ## thingy = { \ + .attr = { .name = #thingy, .mode = 0220 }, \ + .size = SIZE, \ + .write = roccat_common2_sysfs_write_ ## thingy \ +} + #endif diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c index 99a605ebb665..07de2f9014c6 100644 --- a/drivers/hid/hid-roccat-konepure.c +++ b/drivers/hid/hid-roccat-konepure.c @@ -15,6 +15,7 @@ * Roccat KonePure is a smaller version of KoneXTD with less buttons and lights. */ +#include <linux/types.h> #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> @@ -23,128 +24,50 @@ #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" -#include "hid-roccat-konepure.h" -static struct class *konepure_class; - -static ssize_t konepure_sysfs_read(struct file *fp, struct kobject *kobj, - char *buf, loff_t off, size_t count, - size_t real_size, uint command) -{ - struct device *dev = - container_of(kobj, struct device, kobj)->parent->parent; - struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev)); - struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); - int retval; - - if (off >= real_size) - return 0; - - if (off != 0 || count != real_size) - return -EINVAL; - - mutex_lock(&konepure->konepure_lock); - retval = roccat_common2_receive(usb_dev, command, buf, real_size); - mutex_unlock(&konepure->konepure_lock); - - return retval ? retval : real_size; -} - -static ssize_t konepure_sysfs_write(struct file *fp, struct kobject *kobj, - void const *buf, loff_t off, size_t count, - size_t real_size, uint command) -{ - struct device *dev = - container_of(kobj, struct device, kobj)->parent->parent; - struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev)); - struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); - int retval; - - if (off != 0 || count != real_size) - return -EINVAL; - - mutex_lock(&konepure->konepure_lock); - retval = roccat_common2_send_with_status(usb_dev, command, - (void *)buf, real_size); - mutex_unlock(&konepure->konepure_lock); - - return retval ? retval : real_size; -} - -#define KONEPURE_SYSFS_W(thingy, THINGY) \ -static ssize_t konepure_sysfs_write_ ## thingy(struct file *fp, \ - struct kobject *kobj, struct bin_attribute *attr, char *buf, \ - loff_t off, size_t count) \ -{ \ - return konepure_sysfs_write(fp, kobj, buf, off, count, \ - KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \ -} - -#define KONEPURE_SYSFS_R(thingy, THINGY) \ -static ssize_t konepure_sysfs_read_ ## thingy(struct file *fp, \ - struct kobject *kobj, struct bin_attribute *attr, char *buf, \ - loff_t off, size_t count) \ -{ \ - return konepure_sysfs_read(fp, kobj, buf, off, count, \ - KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \ -} +enum { + KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3, +}; -#define KONEPURE_SYSFS_RW(thingy, THINGY) \ -KONEPURE_SYSFS_W(thingy, THINGY) \ -KONEPURE_SYSFS_R(thingy, THINGY) - -#define KONEPURE_BIN_ATTRIBUTE_RW(thingy, THINGY) \ -KONEPURE_SYSFS_RW(thingy, THINGY); \ -static struct bin_attribute bin_attr_##thingy = { \ - .attr = { .name = #thingy, .mode = 0660 }, \ - .size = KONEPURE_SIZE_ ## THINGY, \ - .read = konepure_sysfs_read_ ## thingy, \ - .write = konepure_sysfs_write_ ## thingy \ -} +struct konepure_mouse_report_button { + uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */ + uint8_t zero; + uint8_t type; + uint8_t data1; + uint8_t data2; + uint8_t zero2; + uint8_t unknown[2]; +} __packed; -#define KONEPURE_BIN_ATTRIBUTE_R(thingy, THINGY) \ -KONEPURE_SYSFS_R(thingy, THINGY); \ -static struct bin_attribute bin_attr_##thingy = { \ - .attr = { .name = #thingy, .mode = 0440 }, \ - .size = KONEPURE_SIZE_ ## THINGY, \ - .read = konepure_sysfs_read_ ## thingy, \ -} - -#define KONEPURE_BIN_ATTRIBUTE_W(thingy, THINGY) \ -KONEPURE_SYSFS_W(thingy, THINGY); \ -static struct bin_attribute bin_attr_##thingy = { \ - .attr = { .name = #thingy, .mode = 0220 }, \ - .size = KONEPURE_SIZE_ ## THINGY, \ - .write = konepure_sysfs_write_ ## thingy \ -} +static struct class *konepure_class; -KONEPURE_BIN_ATTRIBUTE_RW(actual_profile, ACTUAL_PROFILE); -KONEPURE_BIN_ATTRIBUTE_RW(info, INFO); -KONEPURE_BIN_ATTRIBUTE_RW(sensor, SENSOR); -KONEPURE_BIN_ATTRIBUTE_RW(tcu, TCU); -KONEPURE_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS); -KONEPURE_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS); -KONEPURE_BIN_ATTRIBUTE_W(control, CONTROL); -KONEPURE_BIN_ATTRIBUTE_W(talk, TALK); -KONEPURE_BIN_ATTRIBUTE_W(macro, MACRO); -KONEPURE_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE); - -static struct bin_attribute *konepure_bin_attributes[] = { +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(actual_profile, 0x05, 0x03); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_settings, 0x06, 0x1f); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_buttons, 0x07, 0x3b); +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(macro, 0x08, 0x0822); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x09, 0x06); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(tcu, 0x0c, 0x04); +ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06); +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10); + +static struct bin_attribute *konepure_bin_attrs[] = { &bin_attr_actual_profile, + &bin_attr_control, &bin_attr_info, + &bin_attr_talk, + &bin_attr_macro, &bin_attr_sensor, &bin_attr_tcu, + &bin_attr_tcu_image, &bin_attr_profile_settings, &bin_attr_profile_buttons, - &bin_attr_control, - &bin_attr_talk, - &bin_attr_macro, - &bin_attr_tcu_image, NULL, }; static const struct attribute_group konepure_group = { - .bin_attrs = konepure_bin_attributes, + .bin_attrs = konepure_bin_attrs, }; static const struct attribute_group *konepure_groups[] = { @@ -152,20 +75,11 @@ static const struct attribute_group *konepure_groups[] = { NULL, }; - -static int konepure_init_konepure_device_struct(struct usb_device *usb_dev, - struct konepure_device *konepure) -{ - mutex_init(&konepure->konepure_lock); - - return 0; -} - static int konepure_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); - struct konepure_device *konepure; + struct roccat_common2_device *konepure; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol @@ -181,9 +95,9 @@ static int konepure_init_specials(struct hid_device *hdev) } hid_set_drvdata(hdev, konepure); - retval = konepure_init_konepure_device_struct(usb_dev, konepure); + retval = roccat_common2_device_init_struct(usb_dev, konepure); if (retval) { - hid_err(hdev, "couldn't init struct konepure_device\n"); + hid_err(hdev, "couldn't init KonePure device\n"); goto exit_free; } @@ -205,7 +119,7 @@ exit_free: static void konepure_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct konepure_device *konepure; + struct roccat_common2_device *konepure; if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) @@ -258,7 +172,7 @@ static int konepure_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct konepure_device *konepure = hid_get_drvdata(hdev); + struct roccat_common2_device *konepure = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) diff --git a/drivers/hid/hid-roccat-konepure.h b/drivers/hid/hid-roccat-konepure.h deleted file mode 100644 index 2cd24e93dfd6..000000000000 --- a/drivers/hid/hid-roccat-konepure.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef __HID_ROCCAT_KONEPURE_H -#define __HID_ROCCAT_KONEPURE_H - -/* - * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net> - */ - -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#include <linux/types.h> - -enum { - KONEPURE_SIZE_ACTUAL_PROFILE = 0x03, - KONEPURE_SIZE_CONTROL = 0x03, - KONEPURE_SIZE_FIRMWARE_WRITE = 0x0402, - KONEPURE_SIZE_INFO = 0x06, - KONEPURE_SIZE_MACRO = 0x0822, - KONEPURE_SIZE_PROFILE_SETTINGS = 0x1f, - KONEPURE_SIZE_PROFILE_BUTTONS = 0x3b, - KONEPURE_SIZE_SENSOR = 0x06, - KONEPURE_SIZE_TALK = 0x10, - KONEPURE_SIZE_TCU = 0x04, - KONEPURE_SIZE_TCU_IMAGE = 0x0404, -}; - -enum konepure_control_requests { - KONEPURE_CONTROL_REQUEST_GENERAL = 0x80, - KONEPURE_CONTROL_REQUEST_BUTTONS = 0x90, -}; - -enum konepure_commands { - KONEPURE_COMMAND_CONTROL = 0x04, - KONEPURE_COMMAND_ACTUAL_PROFILE = 0x05, - KONEPURE_COMMAND_PROFILE_SETTINGS = 0x06, - KONEPURE_COMMAND_PROFILE_BUTTONS = 0x07, - KONEPURE_COMMAND_MACRO = 0x08, - KONEPURE_COMMAND_INFO = 0x09, - KONEPURE_COMMAND_TCU = 0x0c, - KONEPURE_COMMAND_TCU_IMAGE = 0x0c, - KONEPURE_COMMAND_E = 0x0e, - KONEPURE_COMMAND_SENSOR = 0x0f, - KONEPURE_COMMAND_TALK = 0x10, - KONEPURE_COMMAND_FIRMWARE_WRITE = 0x1b, - KONEPURE_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c, -}; - -enum { - KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3, -}; - -struct konepure_mouse_report_button { - uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */ - uint8_t zero; - uint8_t type; - uint8_t data1; - uint8_t data2; - uint8_t zero2; - uint8_t unknown[2]; -} __packed; - -struct konepure_device { - int roccat_claimed; - int chrdev_minor; - struct mutex konepure_lock; -}; - -#endif diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 0c8e1ef0b67d..966047711fbf 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -554,9 +554,13 @@ static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus, break; case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI: kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1); + break; case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY: kovaplus->actual_x_sensitivity = button_report->data1; kovaplus->actual_y_sensitivity = button_report->data2; + break; + default: + break; } } diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c new file mode 100644 index 000000000000..47cc8f30ff6d --- /dev/null +++ b/drivers/hid/hid-roccat-ryos.c @@ -0,0 +1,241 @@ +/* + * Roccat Ryos driver for Linux + * + * Copyright (c) 2013 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/input.h> +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/hid-roccat.h> +#include "hid-ids.h" +#include "hid-roccat-common.h" + +enum { + RYOS_REPORT_NUMBER_SPECIAL = 3, + RYOS_USB_INTERFACE_PROTOCOL = 0, +}; + +struct ryos_report_special { + uint8_t number; /* RYOS_REPORT_NUMBER_SPECIAL */ + uint8_t data[4]; +} __packed; + +static struct class *ryos_class; + +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x05, 0x03); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_primary, 0x06, 0x7d); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_function, 0x07, 0x5f); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_macro, 0x08, 0x23); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_thumbster, 0x09, 0x17); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_extra, 0x0a, 0x08); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_easyzone, 0x0b, 0x126); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(key_mask, 0x0c, 0x06); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light, 0x0d, 0x10); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x0e, 0x7d2); +ROCCAT_COMMON2_BIN_ATTRIBUTE_R(info, 0x0f, 0x08); +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(reset, 0x11, 0x03); +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(light_control, 0x13, 0x08); +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x16, 0x10); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566); +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2); + +static struct bin_attribute *ryos_bin_attrs[] = { + &bin_attr_control, + &bin_attr_profile, + &bin_attr_keys_primary, + &bin_attr_keys_function, + &bin_attr_keys_macro, + &bin_attr_keys_thumbster, + &bin_attr_keys_extra, + &bin_attr_keys_easyzone, + &bin_attr_key_mask, + &bin_attr_light, + &bin_attr_macro, + &bin_attr_info, + &bin_attr_reset, + &bin_attr_light_control, + &bin_attr_talk, + &bin_attr_stored_lights, + &bin_attr_custom_lights, + &bin_attr_light_macro, + NULL, +}; + +static const struct attribute_group ryos_group = { + .bin_attrs = ryos_bin_attrs, +}; + +static const struct attribute_group *ryos_groups[] = { + &ryos_group, + NULL, +}; + +static int ryos_init_specials(struct hid_device *hdev) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *usb_dev = interface_to_usbdev(intf); + struct roccat_common2_device *ryos; + int retval; + + if (intf->cur_altsetting->desc.bInterfaceProtocol + != RYOS_USB_INTERFACE_PROTOCOL) { + hid_set_drvdata(hdev, NULL); + return 0; + } + + ryos = kzalloc(sizeof(*ryos), GFP_KERNEL); + if (!ryos) { + hid_err(hdev, "can't alloc device descriptor\n"); + return -ENOMEM; + } + hid_set_drvdata(hdev, ryos); + + retval = roccat_common2_device_init_struct(usb_dev, ryos); + if (retval) { + hid_err(hdev, "couldn't init Ryos device\n"); + goto exit_free; + } + + retval = roccat_connect(ryos_class, hdev, + sizeof(struct ryos_report_special)); + if (retval < 0) { + hid_err(hdev, "couldn't init char dev\n"); + } else { + ryos->chrdev_minor = retval; + ryos->roccat_claimed = 1; + } + + return 0; +exit_free: + kfree(ryos); + return retval; +} + +static void ryos_remove_specials(struct hid_device *hdev) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct roccat_common2_device *ryos; + + if (intf->cur_altsetting->desc.bInterfaceProtocol + != RYOS_USB_INTERFACE_PROTOCOL) + return; + + ryos = hid_get_drvdata(hdev); + if (ryos->roccat_claimed) + roccat_disconnect(ryos->chrdev_minor); + kfree(ryos); +} + +static int ryos_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int retval; + + retval = hid_parse(hdev); + if (retval) { + hid_err(hdev, "parse failed\n"); + goto exit; + } + + retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (retval) { + hid_err(hdev, "hw start failed\n"); + goto exit; + } + + retval = ryos_init_specials(hdev); + if (retval) { + hid_err(hdev, "couldn't install mouse\n"); + goto exit_stop; + } + + return 0; + +exit_stop: + hid_hw_stop(hdev); +exit: + return retval; +} + +static void ryos_remove(struct hid_device *hdev) +{ + ryos_remove_specials(hdev); + hid_hw_stop(hdev); +} + +static int ryos_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct roccat_common2_device *ryos = hid_get_drvdata(hdev); + + if (intf->cur_altsetting->desc.bInterfaceProtocol + != RYOS_USB_INTERFACE_PROTOCOL) + return 0; + + if (data[0] != RYOS_REPORT_NUMBER_SPECIAL) + return 0; + + if (ryos != NULL && ryos->roccat_claimed) + roccat_report_event(ryos->chrdev_minor, data); + + return 0; +} + +static const struct hid_device_id ryos_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) }, + { } +}; + +MODULE_DEVICE_TABLE(hid, ryos_devices); + +static struct hid_driver ryos_driver = { + .name = "ryos", + .id_table = ryos_devices, + .probe = ryos_probe, + .remove = ryos_remove, + .raw_event = ryos_raw_event +}; + +static int __init ryos_init(void) +{ + int retval; + + ryos_class = class_create(THIS_MODULE, "ryos"); + if (IS_ERR(ryos_class)) + return PTR_ERR(ryos_class); + ryos_class->dev_groups = ryos_groups; + + retval = hid_register_driver(&ryos_driver); + if (retval) + class_destroy(ryos_class); + return retval; +} + +static void __exit ryos_exit(void) +{ + hid_unregister_driver(&ryos_driver); + class_destroy(ryos_class); +} + +module_init(ryos_init); +module_exit(ryos_exit); + +MODULE_AUTHOR("Stefan Achatz"); +MODULE_DESCRIPTION("USB Roccat Ryos MK/Glow/Pro driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c index 0332267199d5..6dbf6e04dce7 100644 --- a/drivers/hid/hid-roccat-savu.c +++ b/drivers/hid/hid-roccat-savu.c @@ -27,98 +27,15 @@ static struct class *savu_class; -static ssize_t savu_sysfs_read(struct file *fp, struct kobject *kobj, - char *buf, loff_t off, size_t count, - size_t real_size, uint command) -{ - struct device *dev = - container_of(kobj, struct device, kobj)->parent->parent; - struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev)); - struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); - int retval; - - if (off >= real_size) - return 0; - - if (off != 0 || count != real_size) - return -EINVAL; - - mutex_lock(&savu->savu_lock); - retval = roccat_common2_receive(usb_dev, command, buf, real_size); - mutex_unlock(&savu->savu_lock); - - return retval ? retval : real_size; -} - -static ssize_t savu_sysfs_write(struct file *fp, struct kobject *kobj, - void const *buf, loff_t off, size_t count, - size_t real_size, uint command) -{ - struct device *dev = - container_of(kobj, struct device, kobj)->parent->parent; - struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev)); - struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); - int retval; - - if (off != 0 || count != real_size) - return -EINVAL; - - mutex_lock(&savu->savu_lock); - retval = roccat_common2_send_with_status(usb_dev, command, - (void *)buf, real_size); - mutex_unlock(&savu->savu_lock); - - return retval ? retval : real_size; -} - -#define SAVU_SYSFS_W(thingy, THINGY) \ -static ssize_t savu_sysfs_write_ ## thingy(struct file *fp, \ - struct kobject *kobj, struct bin_attribute *attr, char *buf, \ - loff_t off, size_t count) \ -{ \ - return savu_sysfs_write(fp, kobj, buf, off, count, \ - SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \ -} - -#define SAVU_SYSFS_R(thingy, THINGY) \ -static ssize_t savu_sysfs_read_ ## thingy(struct file *fp, \ - struct kobject *kobj, struct bin_attribute *attr, char *buf, \ - loff_t off, size_t count) \ -{ \ - return savu_sysfs_read(fp, kobj, buf, off, count, \ - SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \ -} - -#define SAVU_SYSFS_RW(thingy, THINGY) \ -SAVU_SYSFS_W(thingy, THINGY) \ -SAVU_SYSFS_R(thingy, THINGY) - -#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \ -SAVU_SYSFS_RW(thingy, THINGY); \ -static struct bin_attribute bin_attr_##thingy = { \ - .attr = { .name = #thingy, .mode = 0660 }, \ - .size = SAVU_SIZE_ ## THINGY, \ - .read = savu_sysfs_read_ ## thingy, \ - .write = savu_sysfs_write_ ## thingy \ -} - -#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \ -SAVU_SYSFS_W(thingy, THINGY); \ -static struct bin_attribute bin_attr_##thingy = { \ - .attr = { .name = #thingy, .mode = 0220 }, \ - .size = SAVU_SIZE_ ## THINGY, \ - .write = savu_sysfs_write_ ## thingy \ -} - -SAVU_BIN_ATTRIBUTE_W(control, CONTROL); -SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE); -SAVU_BIN_ATTRIBUTE_RW(general, GENERAL); -SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS); -SAVU_BIN_ATTRIBUTE_RW(macro, MACRO); -SAVU_BIN_ATTRIBUTE_RW(info, INFO); -SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR); - -static struct bin_attribute *savu_bin_attributes[] = { +ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x4, 0x03); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x5, 0x03); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(general, 0x6, 0x10); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(buttons, 0x7, 0x2f); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08); +ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04); + +static struct bin_attribute *savu_bin_attrs[] = { &bin_attr_control, &bin_attr_profile, &bin_attr_general, @@ -130,7 +47,7 @@ static struct bin_attribute *savu_bin_attributes[] = { }; static const struct attribute_group savu_group = { - .bin_attrs = savu_bin_attributes, + .bin_attrs = savu_bin_attrs, }; static const struct attribute_group *savu_groups[] = { @@ -138,19 +55,11 @@ static const struct attribute_group *savu_groups[] = { NULL, }; -static int savu_init_savu_device_struct(struct usb_device *usb_dev, - struct savu_device *savu) -{ - mutex_init(&savu->savu_lock); - - return 0; -} - static int savu_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); - struct savu_device *savu; + struct roccat_common2_device *savu; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol @@ -166,9 +75,9 @@ static int savu_init_specials(struct hid_device *hdev) } hid_set_drvdata(hdev, savu); - retval = savu_init_savu_device_struct(usb_dev, savu); + retval = roccat_common2_device_init_struct(usb_dev, savu); if (retval) { - hid_err(hdev, "couldn't init struct savu_device\n"); + hid_err(hdev, "couldn't init Savu device\n"); goto exit_free; } @@ -190,7 +99,7 @@ exit_free: static void savu_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct savu_device *savu; + struct roccat_common2_device *savu; if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) @@ -239,7 +148,7 @@ static void savu_remove(struct hid_device *hdev) hid_hw_stop(hdev); } -static void savu_report_to_chrdev(struct savu_device const *savu, +static void savu_report_to_chrdev(struct roccat_common2_device const *savu, u8 const *data) { struct savu_roccat_report roccat_report; @@ -261,7 +170,7 @@ static int savu_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct savu_device *savu = hid_get_drvdata(hdev); + struct roccat_common2_device *savu = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) diff --git a/drivers/hid/hid-roccat-savu.h b/drivers/hid/hid-roccat-savu.h index 9120ba72087f..d23217bd2b86 100644 --- a/drivers/hid/hid-roccat-savu.h +++ b/drivers/hid/hid-roccat-savu.h @@ -14,31 +14,6 @@ #include <linux/types.h> -enum { - SAVU_SIZE_CONTROL = 0x03, - SAVU_SIZE_PROFILE = 0x03, - SAVU_SIZE_GENERAL = 0x10, - SAVU_SIZE_BUTTONS = 0x2f, - SAVU_SIZE_MACRO = 0x0823, - SAVU_SIZE_INFO = 0x08, - SAVU_SIZE_SENSOR = 0x04, -}; - -enum savu_control_requests { - SAVU_CONTROL_REQUEST_GENERAL = 0x80, - SAVU_CONTROL_REQUEST_BUTTONS = 0x90, -}; - -enum savu_commands { - SAVU_COMMAND_CONTROL = 0x4, - SAVU_COMMAND_PROFILE = 0x5, - SAVU_COMMAND_GENERAL = 0x6, - SAVU_COMMAND_BUTTONS = 0x7, - SAVU_COMMAND_MACRO = 0x8, - SAVU_COMMAND_INFO = 0x9, - SAVU_COMMAND_SENSOR = 0xc, -}; - struct savu_mouse_report_special { uint8_t report_number; /* always 3 */ uint8_t zero; @@ -77,11 +52,4 @@ struct savu_roccat_report { uint8_t data[2]; } __packed; -struct savu_device { - int roccat_claimed; - int chrdev_minor; - - struct mutex savu_lock; -}; - #endif diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 88fc5aefcd96..a184e1921c11 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -326,7 +326,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, field->logical == attr_usage_id) { sensor_hub_fill_attr_info(info, i, report->id, field->unit, field->unit_exponent, - field->report_size); + field->report_size * + field->report_count); ret = 0; } else { for (j = 0; j < field->maxusage; ++j) { @@ -338,7 +339,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, i, report->id, field->unit, field->unit_exponent, - field->report_size); + field->report_size * + field->report_count); ret = 0; break; } @@ -425,9 +427,10 @@ static int sensor_hub_raw_event(struct hid_device *hdev, hid_dbg(hdev, "%d collection_index:%x hid:%x sz:%x\n", i, report->field[i]->usage->collection_index, report->field[i]->usage->hid, - report->field[i]->report_size/8); - - sz = report->field[i]->report_size/8; + (report->field[i]->report_size * + report->field[i]->report_count)/8); + sz = (report->field[i]->report_size * + report->field[i]->report_count)/8; if (pdata->pending.status && pdata->pending.attr_usage_id == report->field[i]->usage->hid) { hid_dbg(hdev, "data was pending ...\n"); diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index b18320db5f7d..da551d113762 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -419,21 +419,14 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf, */ static int sixaxis_set_operational_usb(struct hid_device *hdev) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct usb_device *dev = interface_to_usbdev(intf); - __u16 ifnum = intf->cur_altsetting->desc.bInterfaceNumber; int ret; char *buf = kmalloc(18, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), - HID_REQ_GET_REPORT, - USB_DIR_IN | USB_TYPE_CLASS | - USB_RECIP_INTERFACE, - (3 << 8) | 0xf2, ifnum, buf, 17, - USB_CTRL_GET_TIMEOUT); + ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT); + if (ret < 0) hid_err(hdev, "can't set operational mode\n"); @@ -621,6 +614,54 @@ static void buzz_remove(struct hid_device *hdev) drv_data->extra = NULL; } +#ifdef CONFIG_SONY_FF +static int sony_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ + unsigned char buf[] = { + 0x01, + 0x00, 0xff, 0x00, 0xff, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, + 0xff, 0x27, 0x10, 0x00, 0x32, + 0xff, 0x27, 0x10, 0x00, 0x32, + 0xff, 0x27, 0x10, 0x00, 0x32, + 0xff, 0x27, 0x10, 0x00, 0x32, + 0x00, 0x00, 0x00, 0x00, 0x00 + }; + __u8 left; + __u8 right; + struct hid_device *hid = input_get_drvdata(dev); + + if (effect->type != FF_RUMBLE) + return 0; + + left = effect->u.rumble.strong_magnitude / 256; + right = effect->u.rumble.weak_magnitude ? 1 : 0; + + buf[3] = right; + buf[5] = left; + + return hid->hid_output_raw_report(hid, buf, sizeof(buf), + HID_OUTPUT_REPORT); +} + +static int sony_init_ff(struct hid_device *hdev) +{ + struct hid_input *hidinput = list_entry(hdev->inputs.next, + struct hid_input, list); + struct input_dev *input_dev = hidinput->input; + + input_set_capability(input_dev, EV_FF, FF_RUMBLE); + return input_ff_create_memless(input_dev, NULL, sony_play_effect); +} + +#else +static int sony_init_ff(struct hid_device *hdev) +{ + return 0; +} +#endif + static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; @@ -670,6 +711,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret < 0) goto err_stop; + ret = sony_init_ff(hdev); + if (ret < 0) + goto err_stop; + return 0; err_stop: hid_hw_stop(hdev); diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c index 71adf9e60b13..6b61f01e01e7 100644 --- a/drivers/hid/hid-wiimote-modules.c +++ b/drivers/hid/hid-wiimote-modules.c @@ -1655,10 +1655,39 @@ static void wiimod_pro_in_ext(struct wiimote_data *wdata, const __u8 *ext) ly = (ext[4] & 0xff) | ((ext[5] & 0x0f) << 8); ry = (ext[6] & 0xff) | ((ext[7] & 0x0f) << 8); - input_report_abs(wdata->extension.input, ABS_X, lx - 0x800); - input_report_abs(wdata->extension.input, ABS_Y, ly - 0x800); - input_report_abs(wdata->extension.input, ABS_RX, rx - 0x800); - input_report_abs(wdata->extension.input, ABS_RY, ry - 0x800); + /* zero-point offsets */ + lx -= 0x800; + ly = 0x800 - ly; + rx -= 0x800; + ry = 0x800 - ry; + + /* Trivial automatic calibration. We don't know any calibration data + * in the EEPROM so we must use the first report to calibrate the + * null-position of the analog sticks. Users can retrigger calibration + * via sysfs, or set it explicitly. If data is off more than abs(500), + * we skip calibration as the sticks are likely to be moved already. */ + if (!(wdata->state.flags & WIIPROTO_FLAG_PRO_CALIB_DONE)) { + wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE; + if (abs(lx) < 500) + wdata->state.calib_pro_sticks[0] = -lx; + if (abs(ly) < 500) + wdata->state.calib_pro_sticks[1] = -ly; + if (abs(rx) < 500) + wdata->state.calib_pro_sticks[2] = -rx; + if (abs(ry) < 500) + wdata->state.calib_pro_sticks[3] = -ry; + } + + /* apply calibration data */ + lx += wdata->state.calib_pro_sticks[0]; + ly += wdata->state.calib_pro_sticks[1]; + rx += wdata->state.calib_pro_sticks[2]; + ry += wdata->state.calib_pro_sticks[3]; + + input_report_abs(wdata->extension.input, ABS_X, lx); + input_report_abs(wdata->extension.input, ABS_Y, ly); + input_report_abs(wdata->extension.input, ABS_RX, rx); + input_report_abs(wdata->extension.input, ABS_RY, ry); input_report_key(wdata->extension.input, wiimod_pro_map[WIIMOD_PRO_KEY_RIGHT], @@ -1766,12 +1795,70 @@ static int wiimod_pro_play(struct input_dev *dev, void *data, return 0; } +static ssize_t wiimod_pro_calib_show(struct device *dev, + struct device_attribute *attr, + char *out) +{ + struct wiimote_data *wdata = dev_to_wii(dev); + int r; + + r = 0; + r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[0]); + r += sprintf(&out[r], "%+06hd ", wdata->state.calib_pro_sticks[1]); + r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[2]); + r += sprintf(&out[r], "%+06hd\n", wdata->state.calib_pro_sticks[3]); + + return r; +} + +static ssize_t wiimod_pro_calib_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wiimote_data *wdata = dev_to_wii(dev); + int r; + s16 x1, y1, x2, y2; + + if (!strncmp(buf, "scan\n", 5)) { + spin_lock_irq(&wdata->state.lock); + wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE; + spin_unlock_irq(&wdata->state.lock); + } else { + r = sscanf(buf, "%hd:%hd %hd:%hd", &x1, &y1, &x2, &y2); + if (r != 4) + return -EINVAL; + + spin_lock_irq(&wdata->state.lock); + wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE; + spin_unlock_irq(&wdata->state.lock); + + wdata->state.calib_pro_sticks[0] = x1; + wdata->state.calib_pro_sticks[1] = y1; + wdata->state.calib_pro_sticks[2] = x2; + wdata->state.calib_pro_sticks[3] = y2; + } + + return strnlen(buf, PAGE_SIZE); +} + +static DEVICE_ATTR(pro_calib, S_IRUGO|S_IWUSR|S_IWGRP, wiimod_pro_calib_show, + wiimod_pro_calib_store); + static int wiimod_pro_probe(const struct wiimod_ops *ops, struct wiimote_data *wdata) { int ret, i; + unsigned long flags; INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); + wdata->state.calib_pro_sticks[0] = 0; + wdata->state.calib_pro_sticks[1] = 0; + wdata->state.calib_pro_sticks[2] = 0; + wdata->state.calib_pro_sticks[3] = 0; + + spin_lock_irqsave(&wdata->state.lock, flags); + wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE; + spin_unlock_irqrestore(&wdata->state.lock, flags); wdata->extension.input = input_allocate_device(); if (!wdata->extension.input) @@ -1786,6 +1873,13 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops, goto err_free; } + ret = device_create_file(&wdata->hdev->dev, + &dev_attr_pro_calib); + if (ret) { + hid_err(wdata->hdev, "cannot create sysfs attribute\n"); + goto err_free; + } + wdata->extension.input->open = wiimod_pro_open; wdata->extension.input->close = wiimod_pro_close; wdata->extension.input->dev.parent = &wdata->hdev->dev; @@ -1806,20 +1900,23 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops, set_bit(ABS_RX, wdata->extension.input->absbit); set_bit(ABS_RY, wdata->extension.input->absbit); input_set_abs_params(wdata->extension.input, - ABS_X, -0x800, 0x800, 2, 4); + ABS_X, -0x400, 0x400, 4, 100); input_set_abs_params(wdata->extension.input, - ABS_Y, -0x800, 0x800, 2, 4); + ABS_Y, -0x400, 0x400, 4, 100); input_set_abs_params(wdata->extension.input, - ABS_RX, -0x800, 0x800, 2, 4); + ABS_RX, -0x400, 0x400, 4, 100); input_set_abs_params(wdata->extension.input, - ABS_RY, -0x800, 0x800, 2, 4); + ABS_RY, -0x400, 0x400, 4, 100); ret = input_register_device(wdata->extension.input); if (ret) - goto err_free; + goto err_file; return 0; +err_file: + device_remove_file(&wdata->hdev->dev, + &dev_attr_pro_calib); err_free: input_free_device(wdata->extension.input); wdata->extension.input = NULL; @@ -1837,6 +1934,8 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops, input_unregister_device(wdata->extension.input); wdata->extension.input = NULL; cancel_work_sync(&wdata->rumble_worker); + device_remove_file(&wdata->hdev->dev, + &dev_attr_pro_calib); spin_lock_irqsave(&wdata->state.lock, flags); wiiproto_req_rumble(wdata, 0); diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h index cfa63b0825b0..10934aa129fb 100644 --- a/drivers/hid/hid-wiimote.h +++ b/drivers/hid/hid-wiimote.h @@ -46,6 +46,7 @@ #define WIIPROTO_FLAG_DRM_LOCKED 0x8000 #define WIIPROTO_FLAG_BUILTIN_MP 0x010000 #define WIIPROTO_FLAG_NO_MP 0x020000 +#define WIIPROTO_FLAG_PRO_CALIB_DONE 0x040000 #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) @@ -135,6 +136,7 @@ struct wiimote_state { /* calibration/cache data */ __u16 calib_bboard[4][3]; + __s16 calib_pro_sticks[4]; __u8 cache_rumble; }; diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index fd7ce374f812..5f7e55f4b7f0 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -455,10 +455,6 @@ static void i2c_hid_init_reports(struct hid_device *hid) } list_for_each_entry(report, - &hid->report_enum[HID_INPUT_REPORT].report_list, list) - i2c_hid_init_report(report, inbuf, ihid->bufsize); - - list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list) i2c_hid_init_report(report, inbuf, ihid->bufsize); @@ -1012,7 +1008,7 @@ static int i2c_hid_probe(struct i2c_client *client, hid->hid_get_raw_report = i2c_hid_get_raw_report; hid->hid_output_raw_report = i2c_hid_output_raw_report; hid->dev.parent = &client->dev; - ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev)); + ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev)); hid->bus = BUS_I2C; hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 3fca3be08337..0db9a67278ba 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -84,6 +84,8 @@ static const struct hid_blacklist { { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET }, diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index cdff74282955..4c4c1421bf28 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -60,6 +60,11 @@ * This driver also supports the G781 from GMT. This device is compatible * with the ADM1032. * + * This driver also supports TMP451 from Texas Instruments. This device is + * supported in both compatibility and extended mode. It's mostly compatible + * with ADT7461 except for local temperature low byte register and max + * conversion rate. + * * Since the LM90 was the first chipset supported by this driver, most * comments will refer to this chipset, but are actually general and * concern all supported chipsets, unless mentioned otherwise. @@ -89,6 +94,8 @@ #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> +#include <linux/interrupt.h> +#include <linux/regulator/consumer.h> /* * Addresses to scan @@ -110,7 +117,7 @@ static const unsigned short normal_i2c[] = { 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, - max6646, w83l771, max6696, sa56004, g781 }; + max6646, w83l771, max6696, sa56004, g781, tmp451 }; /* * The LM90 registers @@ -167,6 +174,9 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, #define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */ #define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */ +/* TMP451 registers */ +#define TMP451_REG_R_LOCAL_TEMPL 0x15 + /* * Device flags */ @@ -179,6 +189,23 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, #define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */ #define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */ +/* LM90 status */ +#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */ +#define LM90_STATUS_RTHRM (1 << 1) /* remote THERM limit tripped */ +#define LM90_STATUS_ROPEN (1 << 2) /* remote is an open circuit */ +#define LM90_STATUS_RLOW (1 << 3) /* remote low temp limit tripped */ +#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */ +#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */ +#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */ + +#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */ +#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */ +#define MAX6696_STATUS2_R2LOW (1 << 3) /* remote2 low temp limit tripped */ +#define MAX6696_STATUS2_R2HIGH (1 << 4) /* remote2 high temp limit tripped */ +#define MAX6696_STATUS2_ROT2 (1 << 5) /* remote emergency limit tripped */ +#define MAX6696_STATUS2_R2OT2 (1 << 6) /* remote2 emergency limit tripped */ +#define MAX6696_STATUS2_LOT2 (1 << 7) /* local emergency limit tripped */ + /* * Driver data (common to all clients) */ @@ -205,6 +232,7 @@ static const struct i2c_device_id lm90_id[] = { { "nct1008", adt7461 }, { "w83l771", w83l771 }, { "sa56004", sa56004 }, + { "tmp451", tmp451 }, { } }; MODULE_DEVICE_TABLE(i2c, lm90_id); @@ -278,7 +306,7 @@ static const struct lm90_params lm90_params[] = { [max6696] = { .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3, - .alert_alarms = 0x187c, + .alert_alarms = 0x1c7c, .max_convrate = 6, .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, }, @@ -293,6 +321,43 @@ static const struct lm90_params lm90_params[] = { .max_convrate = 9, .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL, }, + [tmp451] = { + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT + | LM90_HAVE_BROKEN_ALERT, + .alert_alarms = 0x7c, + .max_convrate = 9, + .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL, + } +}; + +/* + * TEMP8 register index + */ +enum lm90_temp8_reg_index { + LOCAL_LOW = 0, + LOCAL_HIGH, + LOCAL_CRIT, + REMOTE_CRIT, + LOCAL_EMERG, /* max6659 and max6695/96 */ + REMOTE_EMERG, /* max6659 and max6695/96 */ + REMOTE2_CRIT, /* max6695/96 only */ + REMOTE2_EMERG, /* max6695/96 only */ + TEMP8_REG_NUM +}; + +/* + * TEMP11 register index + */ +enum lm90_temp11_reg_index { + REMOTE_TEMP = 0, + REMOTE_LOW, + REMOTE_HIGH, + REMOTE_OFFSET, /* except max6646, max6657/58/59, and max6695/96 */ + LOCAL_TEMP, + REMOTE2_TEMP, /* max6695/96 only */ + REMOTE2_LOW, /* max6695/96 only */ + REMOTE2_HIGH, /* max6695/96 only */ + TEMP11_REG_NUM }; /* @@ -302,6 +367,7 @@ static const struct lm90_params lm90_params[] = { struct lm90_data { struct device *hwmon_dev; struct mutex update_lock; + struct regulator *regulator; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ int kind; @@ -317,25 +383,8 @@ struct lm90_data { u8 reg_local_ext; /* local extension register offset */ /* registers values */ - s8 temp8[8]; /* 0: local low limit - * 1: local high limit - * 2: local critical limit - * 3: remote critical limit - * 4: local emergency limit (max6659 and max6695/96) - * 5: remote emergency limit (max6659 and max6695/96) - * 6: remote 2 critical limit (max6695/96 only) - * 7: remote 2 emergency limit (max6695/96 only) - */ - s16 temp11[8]; /* 0: remote input - * 1: remote low limit - * 2: remote high limit - * 3: remote offset (except max6646, max6657/58/59, - * and max6695/96) - * 4: local input - * 5: remote 2 input (max6695/96 only) - * 6: remote 2 low limit (max6695/96 only) - * 7: remote 2 high limit (max6695/96 only) - */ + s8 temp8[TEMP8_REG_NUM]; + s16 temp11[TEMP11_REG_NUM]; u8 temp_hyst; u16 alarms; /* bitvector (upper 8 bits for max6695/96) */ }; @@ -477,37 +526,42 @@ static struct lm90_data *lm90_update_device(struct device *dev) u8 alarms; dev_dbg(&client->dev, "Updating lm90 data.\n"); - lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, &data->temp8[0]); - lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, &data->temp8[1]); - lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, &data->temp8[2]); - lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]); + lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, + &data->temp8[LOCAL_LOW]); + lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, + &data->temp8[LOCAL_HIGH]); + lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, + &data->temp8[LOCAL_CRIT]); + lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, + &data->temp8[REMOTE_CRIT]); lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst); if (data->reg_local_ext) { lm90_read16(client, LM90_REG_R_LOCAL_TEMP, data->reg_local_ext, - &data->temp11[4]); + &data->temp11[LOCAL_TEMP]); } else { if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP, &h) == 0) - data->temp11[4] = h << 8; + data->temp11[LOCAL_TEMP] = h << 8; } lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, - LM90_REG_R_REMOTE_TEMPL, &data->temp11[0]); + LM90_REG_R_REMOTE_TEMPL, + &data->temp11[REMOTE_TEMP]); if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) { - data->temp11[1] = h << 8; + data->temp11[REMOTE_LOW] = h << 8; if ((data->flags & LM90_HAVE_REM_LIMIT_EXT) && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL, &l) == 0) - data->temp11[1] |= l; + data->temp11[REMOTE_LOW] |= l; } if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) { - data->temp11[2] = h << 8; + data->temp11[REMOTE_HIGH] = h << 8; if ((data->flags & LM90_HAVE_REM_LIMIT_EXT) && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL, &l) == 0) - data->temp11[2] |= l; + data->temp11[REMOTE_HIGH] |= l; } if (data->flags & LM90_HAVE_OFFSET) { @@ -515,13 +569,13 @@ static struct lm90_data *lm90_update_device(struct device *dev) &h) == 0 && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL, &l) == 0) - data->temp11[3] = (h << 8) | l; + data->temp11[REMOTE_OFFSET] = (h << 8) | l; } if (data->flags & LM90_HAVE_EMERGENCY) { lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG, - &data->temp8[4]); + &data->temp8[LOCAL_EMERG]); lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG, - &data->temp8[5]); + &data->temp8[REMOTE_EMERG]); } lm90_read_reg(client, LM90_REG_R_STATUS, &alarms); data->alarms = alarms; /* save as 16 bit value */ @@ -529,15 +583,16 @@ static struct lm90_data *lm90_update_device(struct device *dev) if (data->kind == max6696) { lm90_select_remote_channel(client, data, 1); lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, - &data->temp8[6]); + &data->temp8[REMOTE2_CRIT]); lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG, - &data->temp8[7]); + &data->temp8[REMOTE2_EMERG]); lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, - LM90_REG_R_REMOTE_TEMPL, &data->temp11[5]); + LM90_REG_R_REMOTE_TEMPL, + &data->temp11[REMOTE2_TEMP]); if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h)) - data->temp11[6] = h << 8; + data->temp11[REMOTE2_LOW] = h << 8; if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h)) - data->temp11[7] = h << 8; + data->temp11[REMOTE2_HIGH] = h << 8; lm90_select_remote_channel(client, data, 0); if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2, @@ -709,7 +764,7 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr, struct lm90_data *data = lm90_update_device(dev); int temp; - if (data->kind == adt7461) + if (data->kind == adt7461 || data->kind == tmp451) temp = temp_from_u8_adt7461(data, data->temp8[attr->index]); else if (data->kind == max6646) temp = temp_from_u8(data->temp8[attr->index]); @@ -726,7 +781,7 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr, static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { - static const u8 reg[8] = { + static const u8 reg[TEMP8_REG_NUM] = { LM90_REG_W_LOCAL_LOW, LM90_REG_W_LOCAL_HIGH, LM90_REG_W_LOCAL_CRIT, @@ -753,7 +808,7 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr, val -= 16000; mutex_lock(&data->update_lock); - if (data->kind == adt7461) + if (data->kind == adt7461 || data->kind == tmp451) data->temp8[nr] = temp_to_u8_adt7461(data, val); else if (data->kind == max6646) data->temp8[nr] = temp_to_u8(val); @@ -775,7 +830,7 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr, struct lm90_data *data = lm90_update_device(dev); int temp; - if (data->kind == adt7461) + if (data->kind == adt7461 || data->kind == tmp451) temp = temp_from_u16_adt7461(data, data->temp11[attr->index]); else if (data->kind == max6646) temp = temp_from_u16(data->temp11[attr->index]); @@ -821,7 +876,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, val -= 16000; mutex_lock(&data->update_lock); - if (data->kind == adt7461) + if (data->kind == adt7461 || data->kind == tmp451) data->temp11[index] = temp_to_u16_adt7461(data, val); else if (data->kind == max6646) data->temp11[index] = temp_to_u8(val) << 8; @@ -850,7 +905,7 @@ static ssize_t show_temphyst(struct device *dev, struct lm90_data *data = lm90_update_device(dev); int temp; - if (data->kind == adt7461) + if (data->kind == adt7461 || data->kind == tmp451) temp = temp_from_u8_adt7461(data, data->temp8[attr->index]); else if (data->kind == max6646) temp = temp_from_u8(data->temp8[attr->index]); @@ -878,12 +933,12 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy, return err; mutex_lock(&data->update_lock); - if (data->kind == adt7461) - temp = temp_from_u8_adt7461(data, data->temp8[2]); + if (data->kind == adt7461 || data->kind == tmp451) + temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]); else if (data->kind == max6646) - temp = temp_from_u8(data->temp8[2]); + temp = temp_from_u8(data->temp8[LOCAL_CRIT]); else - temp = temp_from_s8(data->temp8[2]); + temp = temp_from_s8(data->temp8[LOCAL_CRIT]); data->temp_hyst = hyst_to_reg(temp - val); i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST, @@ -937,25 +992,28 @@ static ssize_t set_update_interval(struct device *dev, return count; } -static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL, 0, 4); -static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL, 0, 0); +static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL, + 0, LOCAL_TEMP); +static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL, + 0, REMOTE_TEMP); static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 0); + set_temp8, LOCAL_LOW); static SENSOR_DEVICE_ATTR_2(temp2_min, S_IWUSR | S_IRUGO, show_temp11, - set_temp11, 0, 1); + set_temp11, 0, REMOTE_LOW); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 1); + set_temp8, LOCAL_HIGH); static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp11, - set_temp11, 1, 2); + set_temp11, 1, REMOTE_HIGH); static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 2); + set_temp8, LOCAL_CRIT); static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 3); + set_temp8, REMOTE_CRIT); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temphyst, - set_temphyst, 2); -static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, 3); + set_temphyst, LOCAL_CRIT); +static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, + REMOTE_CRIT); static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp11, - set_temp11, 2, 3); + set_temp11, 2, REMOTE_OFFSET); /* Individual alarm files */ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0); @@ -1003,13 +1061,13 @@ static const struct attribute_group lm90_group = { * Additional attributes for devices with emergency sensors */ static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 4); + set_temp8, LOCAL_EMERG); static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 5); + set_temp8, REMOTE_EMERG); static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO, show_temphyst, - NULL, 4); + NULL, LOCAL_EMERG); static SENSOR_DEVICE_ATTR(temp2_emergency_hyst, S_IRUGO, show_temphyst, - NULL, 5); + NULL, REMOTE_EMERG); static struct attribute *lm90_emergency_attributes[] = { &sensor_dev_attr_temp1_emergency.dev_attr.attr, @@ -1039,18 +1097,20 @@ static const struct attribute_group lm90_emergency_alarm_group = { /* * Additional attributes for devices with 3 temperature sensors */ -static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL, 0, 5); +static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL, + 0, REMOTE2_TEMP); static SENSOR_DEVICE_ATTR_2(temp3_min, S_IWUSR | S_IRUGO, show_temp11, - set_temp11, 3, 6); + set_temp11, 3, REMOTE2_LOW); static SENSOR_DEVICE_ATTR_2(temp3_max, S_IWUSR | S_IRUGO, show_temp11, - set_temp11, 4, 7); + set_temp11, 4, REMOTE2_HIGH); static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 6); -static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL, 6); + set_temp8, REMOTE2_CRIT); +static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL, + REMOTE2_CRIT); static SENSOR_DEVICE_ATTR(temp3_emergency, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 7); + set_temp8, REMOTE2_EMERG); static SENSOR_DEVICE_ATTR(temp3_emergency_hyst, S_IRUGO, show_temphyst, - NULL, 7); + NULL, REMOTE2_EMERG); static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 10); @@ -1306,6 +1366,19 @@ static int lm90_detect(struct i2c_client *client, && (config1 & 0x3F) == 0x00 && convrate <= 0x08) name = "g781"; + } else + if (address == 0x4C + && man_id == 0x55) { /* Texas Instruments */ + int local_ext; + + local_ext = i2c_smbus_read_byte_data(client, + TMP451_REG_R_LOCAL_TEMPL); + + if (chip_id == 0x00 /* TMP451 */ + && (config1 & 0x1B) == 0x00 + && convrate <= 0x09 + && (local_ext & 0x0F) == 0x00) + name = "tmp451"; } if (!name) { /* identification failed */ @@ -1367,7 +1440,7 @@ static void lm90_init_client(struct i2c_client *client) data->config_orig = config; /* Check Temperature Range Select */ - if (data->kind == adt7461) { + if (data->kind == adt7461 || data->kind == tmp451) { if (config & 0x04) data->flags |= LM90_FLAG_ADT7461_EXT; } @@ -1391,14 +1464,74 @@ static void lm90_init_client(struct i2c_client *client) i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config); } +static bool lm90_is_tripped(struct i2c_client *client, u16 *status) +{ + struct lm90_data *data = i2c_get_clientdata(client); + u8 st, st2 = 0; + + lm90_read_reg(client, LM90_REG_R_STATUS, &st); + + if (data->kind == max6696) + lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2); + + *status = st | (st2 << 8); + + if ((st & 0x7f) == 0 && (st2 & 0xfe) == 0) + return false; + + if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) || + (st2 & MAX6696_STATUS2_LOT2)) + dev_warn(&client->dev, + "temp%d out of range, please check!\n", 1); + if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) || + (st2 & MAX6696_STATUS2_ROT2)) + dev_warn(&client->dev, + "temp%d out of range, please check!\n", 2); + if (st & LM90_STATUS_ROPEN) + dev_warn(&client->dev, + "temp%d diode open, please check!\n", 2); + if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH | + MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2)) + dev_warn(&client->dev, + "temp%d out of range, please check!\n", 3); + if (st2 & MAX6696_STATUS2_R2OPEN) + dev_warn(&client->dev, + "temp%d diode open, please check!\n", 3); + + return true; +} + +static irqreturn_t lm90_irq_thread(int irq, void *dev_id) +{ + struct i2c_client *client = dev_id; + u16 status; + + if (lm90_is_tripped(client, &status)) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + static int lm90_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; struct i2c_adapter *adapter = to_i2c_adapter(dev->parent); struct lm90_data *data; + struct regulator *regulator; int err; + regulator = devm_regulator_get(dev, "vcc"); + if (IS_ERR(regulator)) + return PTR_ERR(regulator); + + err = regulator_enable(regulator); + if (err < 0) { + dev_err(&client->dev, + "Failed to enable regulator: %d\n", err); + return err; + } + data = devm_kzalloc(&client->dev, sizeof(struct lm90_data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -1406,6 +1539,8 @@ static int lm90_probe(struct i2c_client *client, i2c_set_clientdata(client, data); mutex_init(&data->update_lock); + data->regulator = regulator; + /* Set the device type */ data->kind = id->driver_data; if (data->kind == adm1032) { @@ -1467,12 +1602,26 @@ static int lm90_probe(struct i2c_client *client, goto exit_remove_files; } + if (client->irq) { + dev_dbg(dev, "IRQ: %d\n", client->irq); + err = devm_request_threaded_irq(dev, client->irq, + NULL, lm90_irq_thread, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "lm90", client); + if (err < 0) { + dev_err(dev, "cannot request IRQ %d\n", client->irq); + goto exit_remove_files; + } + } + return 0; exit_remove_files: lm90_remove_files(client, data); exit_restore: lm90_restore_conf(client, data); + regulator_disable(data->regulator); + return err; } @@ -1483,49 +1632,33 @@ static int lm90_remove(struct i2c_client *client) hwmon_device_unregister(data->hwmon_dev); lm90_remove_files(client, data); lm90_restore_conf(client, data); + regulator_disable(data->regulator); return 0; } static void lm90_alert(struct i2c_client *client, unsigned int flag) { - struct lm90_data *data = i2c_get_clientdata(client); - u8 config, alarms, alarms2 = 0; - - lm90_read_reg(client, LM90_REG_R_STATUS, &alarms); - - if (data->kind == max6696) - lm90_read_reg(client, MAX6696_REG_R_STATUS2, &alarms2); - - if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) { - dev_info(&client->dev, "Everything OK\n"); - } else { - if (alarms & 0x61) - dev_warn(&client->dev, - "temp%d out of range, please check!\n", 1); - if (alarms & 0x1a) - dev_warn(&client->dev, - "temp%d out of range, please check!\n", 2); - if (alarms & 0x04) - dev_warn(&client->dev, - "temp%d diode open, please check!\n", 2); - - if (alarms2 & 0x18) - dev_warn(&client->dev, - "temp%d out of range, please check!\n", 3); + u16 alarms; + if (lm90_is_tripped(client, &alarms)) { /* * Disable ALERT# output, because these chips don't implement * SMBus alert correctly; they should only hold the alert line * low briefly. */ + struct lm90_data *data = i2c_get_clientdata(client); + if ((data->flags & LM90_HAVE_BROKEN_ALERT) && (alarms & data->alert_alarms)) { + u8 config; dev_dbg(&client->dev, "Disabling ALERT#\n"); lm90_read_reg(client, LM90_REG_R_CONFIG1, &config); i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config | 0x80); } + } else { + dev_info(&client->dev, "Everything OK\n"); } } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index cdcbd8368ed3..3b26129f6055 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -109,6 +109,7 @@ config I2C_I801 Avoton (SOC) Wellsburg (PCH) Coleto Creek (PCH) + Wildcat Point-LP (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -345,6 +346,16 @@ config I2C_BCM2835 This support is also available as a module. If so, the module will be called i2c-bcm2835. +config I2C_BCM_KONA + tristate "BCM Kona I2C adapter" + depends on ARCH_BCM_MOBILE + default y + help + If you say yes to this option, support will be included for the + I2C interface on the Broadcom Kona family of processors. + + If you do not need KONA I2C inteface, say N. + config I2C_BLACKFIN_TWI tristate "Blackfin TWI I2C support" depends on BLACKFIN @@ -436,6 +447,13 @@ config I2C_EG20T ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. +config I2C_EXYNOS5 + tristate "Exynos5 high-speed I2C driver" + depends on ARCH_EXYNOS5 && OF + help + Say Y here to include support for high-speed I2C controller in the + Exynos5 based Samsung SoCs. + config I2C_GPIO tristate "GPIO-based bitbanging I2C" depends on GPIOLIB @@ -665,7 +683,7 @@ config I2C_SH7760 config I2C_SH_MOBILE tristate "SuperH Mobile I2C Controller" - depends on SUPERH || ARCH_SHMOBILE + depends on SUPERH || ARM || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Renesas SH-Mobile processor. @@ -695,6 +713,16 @@ config I2C_SIRF This driver can also be built as a module. If so, the module will be called i2c-sirf. +config I2C_ST + tristate "STMicroelectronics SSC I2C support" + depends on ARCH_STI + help + Enable this option to add support for STMicroelectronics SoCs + hardware SSC (Synchronous Serial Controller) as an I2C controller. + + This driver can also be built as module. If so, the module + will be called i2c-st. + config I2C_STU300 tristate "ST Microelectronics DDC I2C interface" depends on MACH_U300 @@ -768,7 +796,7 @@ config I2C_XLR config I2C_RCAR tristate "Renesas R-Car I2C Controller" - depends on ARCH_SHMOBILE && I2C + depends on ARM || COMPILE_TEST help If you say yes to this option, support will be included for the R-Car I2C controller. diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index d00997f3eb3b..c73eb0ea788e 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -42,6 +42,7 @@ i2c-designware-platform-objs := i2c-designware-platdrv.o obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o +obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o @@ -68,6 +69,7 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o +obj-$(CONFIG_I2C_ST) += i2c-st.o obj-$(CONFIG_I2C_STU300) += i2c-stu300.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o @@ -87,6 +89,7 @@ obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o # Other I2C/SMBus bus drivers obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o +obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c new file mode 100644 index 000000000000..036cf03aeb61 --- /dev/null +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -0,0 +1,909 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/slab.h> + +/* Hardware register offsets and field defintions */ +#define CS_OFFSET 0x00000020 +#define CS_ACK_SHIFT 3 +#define CS_ACK_MASK 0x00000008 +#define CS_ACK_CMD_GEN_START 0x00000000 +#define CS_ACK_CMD_GEN_RESTART 0x00000001 +#define CS_CMD_SHIFT 1 +#define CS_CMD_CMD_NO_ACTION 0x00000000 +#define CS_CMD_CMD_START_RESTART 0x00000001 +#define CS_CMD_CMD_STOP 0x00000002 +#define CS_EN_SHIFT 0 +#define CS_EN_CMD_ENABLE_BSC 0x00000001 + +#define TIM_OFFSET 0x00000024 +#define TIM_PRESCALE_SHIFT 6 +#define TIM_P_SHIFT 3 +#define TIM_NO_DIV_SHIFT 2 +#define TIM_DIV_SHIFT 0 + +#define DAT_OFFSET 0x00000028 + +#define TOUT_OFFSET 0x0000002c + +#define TXFCR_OFFSET 0x0000003c +#define TXFCR_FIFO_FLUSH_MASK 0x00000080 +#define TXFCR_FIFO_EN_MASK 0x00000040 + +#define IER_OFFSET 0x00000044 +#define IER_READ_COMPLETE_INT_MASK 0x00000010 +#define IER_I2C_INT_EN_MASK 0x00000008 +#define IER_FIFO_INT_EN_MASK 0x00000002 +#define IER_NOACK_EN_MASK 0x00000001 + +#define ISR_OFFSET 0x00000048 +#define ISR_RESERVED_MASK 0xffffff60 +#define ISR_CMDBUSY_MASK 0x00000080 +#define ISR_READ_COMPLETE_MASK 0x00000010 +#define ISR_SES_DONE_MASK 0x00000008 +#define ISR_ERR_MASK 0x00000004 +#define ISR_TXFIFOEMPTY_MASK 0x00000002 +#define ISR_NOACK_MASK 0x00000001 + +#define CLKEN_OFFSET 0x0000004C +#define CLKEN_AUTOSENSE_OFF_MASK 0x00000080 +#define CLKEN_M_SHIFT 4 +#define CLKEN_N_SHIFT 1 +#define CLKEN_CLKEN_MASK 0x00000001 + +#define FIFO_STATUS_OFFSET 0x00000054 +#define FIFO_STATUS_RXFIFO_EMPTY_MASK 0x00000004 +#define FIFO_STATUS_TXFIFO_EMPTY_MASK 0x00000010 + +#define HSTIM_OFFSET 0x00000058 +#define HSTIM_HS_MODE_MASK 0x00008000 +#define HSTIM_HS_HOLD_SHIFT 10 +#define HSTIM_HS_HIGH_PHASE_SHIFT 5 +#define HSTIM_HS_SETUP_SHIFT 0 + +#define PADCTL_OFFSET 0x0000005c +#define PADCTL_PAD_OUT_EN_MASK 0x00000004 + +#define RXFCR_OFFSET 0x00000068 +#define RXFCR_NACK_EN_SHIFT 7 +#define RXFCR_READ_COUNT_SHIFT 0 +#define RXFIFORDOUT_OFFSET 0x0000006c + +/* Locally used constants */ +#define MAX_RX_FIFO_SIZE 64U /* bytes */ +#define MAX_TX_FIFO_SIZE 64U /* bytes */ + +#define STD_EXT_CLK_FREQ 13000000UL +#define HS_EXT_CLK_FREQ 104000000UL + +#define MASTERCODE 0x08 /* Mastercodes are 0000_1xxxb */ + +#define I2C_TIMEOUT 100 /* msecs */ + +/* Operations that can be commanded to the controller */ +enum bcm_kona_cmd_t { + BCM_CMD_NOACTION = 0, + BCM_CMD_START, + BCM_CMD_RESTART, + BCM_CMD_STOP, +}; + +enum bus_speed_index { + BCM_SPD_100K = 0, + BCM_SPD_400K, + BCM_SPD_1MHZ, +}; + +enum hs_bus_speed_index { + BCM_SPD_3P4MHZ = 0, +}; + +/* Internal divider settings for standard mode, fast mode and fast mode plus */ +struct bus_speed_cfg { + uint8_t time_m; /* Number of cycles for setup time */ + uint8_t time_n; /* Number of cycles for hold time */ + uint8_t prescale; /* Prescale divider */ + uint8_t time_p; /* Timing coefficient */ + uint8_t no_div; /* Disable clock divider */ + uint8_t time_div; /* Post-prescale divider */ +}; + +/* Internal divider settings for high-speed mode */ +struct hs_bus_speed_cfg { + uint8_t hs_hold; /* Number of clock cycles SCL stays low until + the end of bit period */ + uint8_t hs_high_phase; /* Number of clock cycles SCL stays high + before it falls */ + uint8_t hs_setup; /* Number of clock cycles SCL stays low + before it rises */ + uint8_t prescale; /* Prescale divider */ + uint8_t time_p; /* Timing coefficient */ + uint8_t no_div; /* Disable clock divider */ + uint8_t time_div; /* Post-prescale divider */ +}; + +static const struct bus_speed_cfg std_cfg_table[] = { + [BCM_SPD_100K] = {0x01, 0x01, 0x03, 0x06, 0x00, 0x02}, + [BCM_SPD_400K] = {0x05, 0x01, 0x03, 0x05, 0x01, 0x02}, + [BCM_SPD_1MHZ] = {0x01, 0x01, 0x03, 0x01, 0x01, 0x03}, +}; + +static const struct hs_bus_speed_cfg hs_cfg_table[] = { + [BCM_SPD_3P4MHZ] = {0x01, 0x08, 0x14, 0x00, 0x06, 0x01, 0x00}, +}; + +struct bcm_kona_i2c_dev { + struct device *device; + + void __iomem *base; + int irq; + struct clk *external_clk; + + struct i2c_adapter adapter; + + struct completion done; + + const struct bus_speed_cfg *std_cfg; + const struct hs_bus_speed_cfg *hs_cfg; +}; + +static void bcm_kona_i2c_send_cmd_to_ctrl(struct bcm_kona_i2c_dev *dev, + enum bcm_kona_cmd_t cmd) +{ + dev_dbg(dev->device, "%s, %d\n", __func__, cmd); + + switch (cmd) { + case BCM_CMD_NOACTION: + writel((CS_CMD_CMD_NO_ACTION << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + case BCM_CMD_START: + writel((CS_ACK_CMD_GEN_START << CS_ACK_SHIFT) | + (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + case BCM_CMD_RESTART: + writel((CS_ACK_CMD_GEN_RESTART << CS_ACK_SHIFT) | + (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + case BCM_CMD_STOP: + writel((CS_CMD_CMD_STOP << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + default: + dev_err(dev->device, "Unknown command %d\n", cmd); + } +} + +static void bcm_kona_i2c_enable_clock(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + CLKEN_OFFSET) | CLKEN_CLKEN_MASK, + dev->base + CLKEN_OFFSET); +} + +static void bcm_kona_i2c_disable_clock(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_CLKEN_MASK, + dev->base + CLKEN_OFFSET); +} + +static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) +{ + struct bcm_kona_i2c_dev *dev = devid; + uint32_t status = readl(dev->base + ISR_OFFSET); + + if ((status & ~ISR_RESERVED_MASK) == 0) + return IRQ_NONE; + + /* Must flush the TX FIFO when NAK detected */ + if (status & ISR_NOACK_MASK) + writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK, + dev->base + TXFCR_OFFSET); + + writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); + complete_all(&dev->done); + + return IRQ_HANDLED; +} + +/* Wait for ISR_CMDBUSY_MASK to go low before writing to CS, DAT, or RCD */ +static int bcm_kona_i2c_wait_if_busy(struct bcm_kona_i2c_dev *dev) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(I2C_TIMEOUT); + + while (readl(dev->base + ISR_OFFSET) & ISR_CMDBUSY_MASK) + if (time_after(jiffies, timeout)) { + dev_err(dev->device, "CMDBUSY timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* Send command to I2C bus */ +static int bcm_kona_send_i2c_cmd(struct bcm_kona_i2c_dev *dev, + enum bcm_kona_cmd_t cmd) +{ + int rc; + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + + /* Make sure the hardware is ready */ + rc = bcm_kona_i2c_wait_if_busy(dev); + if (rc < 0) + return rc; + + /* Unmask the session done interrupt */ + writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET); + + /* Mark as incomplete before sending the command */ + reinit_completion(&dev->done); + + /* Send the command */ + bcm_kona_i2c_send_cmd_to_ctrl(dev, cmd); + + /* Wait for transaction to finish or timeout */ + time_left = wait_for_completion_timeout(&dev->done, time_left); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + if (!time_left) { + dev_err(dev->device, "controller timed out\n"); + rc = -ETIMEDOUT; + } + + /* Clear command */ + bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION); + + return rc; +} + +/* Read a single RX FIFO worth of data from the i2c bus */ +static int bcm_kona_i2c_read_fifo_single(struct bcm_kona_i2c_dev *dev, + uint8_t *buf, unsigned int len, + unsigned int last_byte_nak) +{ + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + + /* Mark as incomplete before starting the RX FIFO */ + reinit_completion(&dev->done); + + /* Unmask the read complete interrupt */ + writel(IER_READ_COMPLETE_INT_MASK, dev->base + IER_OFFSET); + + /* Start the RX FIFO */ + writel((last_byte_nak << RXFCR_NACK_EN_SHIFT) | + (len << RXFCR_READ_COUNT_SHIFT), + dev->base + RXFCR_OFFSET); + + /* Wait for FIFO read to complete */ + time_left = wait_for_completion_timeout(&dev->done, time_left); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + if (!time_left) { + dev_err(dev->device, "RX FIFO time out\n"); + return -EREMOTEIO; + } + + /* Read data from FIFO */ + for (; len > 0; len--, buf++) + *buf = readl(dev->base + RXFIFORDOUT_OFFSET); + + return 0; +} + +/* Read any amount of data using the RX FIFO from the i2c bus */ +static int bcm_kona_i2c_read_fifo(struct bcm_kona_i2c_dev *dev, + struct i2c_msg *msg) +{ + unsigned int bytes_to_read = MAX_RX_FIFO_SIZE; + unsigned int last_byte_nak = 0; + unsigned int bytes_read = 0; + int rc; + + uint8_t *tmp_buf = msg->buf; + + while (bytes_read < msg->len) { + if (msg->len - bytes_read <= MAX_RX_FIFO_SIZE) { + last_byte_nak = 1; /* NAK last byte of transfer */ + bytes_to_read = msg->len - bytes_read; + } + + rc = bcm_kona_i2c_read_fifo_single(dev, tmp_buf, bytes_to_read, + last_byte_nak); + if (rc < 0) + return -EREMOTEIO; + + bytes_read += bytes_to_read; + tmp_buf += bytes_to_read; + } + + return 0; +} + +/* Write a single byte of data to the i2c bus */ +static int bcm_kona_i2c_write_byte(struct bcm_kona_i2c_dev *dev, uint8_t data, + unsigned int nak_expected) +{ + int rc; + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + unsigned int nak_received; + + /* Make sure the hardware is ready */ + rc = bcm_kona_i2c_wait_if_busy(dev); + if (rc < 0) + return rc; + + /* Clear pending session done interrupt */ + writel(ISR_SES_DONE_MASK, dev->base + ISR_OFFSET); + + /* Unmask the session done interrupt */ + writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET); + + /* Mark as incomplete before sending the data */ + reinit_completion(&dev->done); + + /* Send one byte of data */ + writel(data, dev->base + DAT_OFFSET); + + /* Wait for byte to be written */ + time_left = wait_for_completion_timeout(&dev->done, time_left); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + if (!time_left) { + dev_dbg(dev->device, "controller timed out\n"); + return -ETIMEDOUT; + } + + nak_received = readl(dev->base + CS_OFFSET) & CS_ACK_MASK ? 1 : 0; + + if (nak_received ^ nak_expected) { + dev_dbg(dev->device, "unexpected NAK/ACK\n"); + return -EREMOTEIO; + } + + return 0; +} + +/* Write a single TX FIFO worth of data to the i2c bus */ +static int bcm_kona_i2c_write_fifo_single(struct bcm_kona_i2c_dev *dev, + uint8_t *buf, unsigned int len) +{ + int k; + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + unsigned int fifo_status; + + /* Mark as incomplete before sending data to the TX FIFO */ + reinit_completion(&dev->done); + + /* Unmask the fifo empty and nak interrupt */ + writel(IER_FIFO_INT_EN_MASK | IER_NOACK_EN_MASK, + dev->base + IER_OFFSET); + + /* Disable IRQ to load a FIFO worth of data without interruption */ + disable_irq(dev->irq); + + /* Write data into FIFO */ + for (k = 0; k < len; k++) + writel(buf[k], (dev->base + DAT_OFFSET)); + + /* Enable IRQ now that data has been loaded */ + enable_irq(dev->irq); + + /* Wait for FIFO to empty */ + do { + time_left = wait_for_completion_timeout(&dev->done, time_left); + fifo_status = readl(dev->base + FIFO_STATUS_OFFSET); + } while (time_left && !(fifo_status & FIFO_STATUS_TXFIFO_EMPTY_MASK)); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + /* Check if there was a NAK */ + if (readl(dev->base + CS_OFFSET) & CS_ACK_MASK) { + dev_err(dev->device, "unexpected NAK\n"); + return -EREMOTEIO; + } + + /* Check if a timeout occured */ + if (!time_left) { + dev_err(dev->device, "completion timed out\n"); + return -EREMOTEIO; + } + + return 0; +} + + +/* Write any amount of data using TX FIFO to the i2c bus */ +static int bcm_kona_i2c_write_fifo(struct bcm_kona_i2c_dev *dev, + struct i2c_msg *msg) +{ + unsigned int bytes_to_write = MAX_TX_FIFO_SIZE; + unsigned int bytes_written = 0; + int rc; + + uint8_t *tmp_buf = msg->buf; + + while (bytes_written < msg->len) { + if (msg->len - bytes_written <= MAX_TX_FIFO_SIZE) + bytes_to_write = msg->len - bytes_written; + + rc = bcm_kona_i2c_write_fifo_single(dev, tmp_buf, + bytes_to_write); + if (rc < 0) + return -EREMOTEIO; + + bytes_written += bytes_to_write; + tmp_buf += bytes_to_write; + } + + return 0; +} + +/* Send i2c address */ +static int bcm_kona_i2c_do_addr(struct bcm_kona_i2c_dev *dev, + struct i2c_msg *msg) +{ + unsigned char addr; + + if (msg->flags & I2C_M_TEN) { + /* First byte is 11110XX0 where XX is upper 2 bits */ + addr = 0xF0 | ((msg->addr & 0x300) >> 7); + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + + /* Second byte is the remaining 8 bits */ + addr = msg->addr & 0xFF; + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + + if (msg->flags & I2C_M_RD) { + /* For read, send restart command */ + if (bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART) < 0) + return -EREMOTEIO; + + /* Then re-send the first byte with the read bit set */ + addr = 0xF0 | ((msg->addr & 0x300) >> 7) | 0x01; + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + } + } else { + addr = msg->addr << 1; + + if (msg->flags & I2C_M_RD) + addr |= 1; + + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + } + + return 0; +} + +static void bcm_kona_i2c_enable_autosense(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_AUTOSENSE_OFF_MASK, + dev->base + CLKEN_OFFSET); +} + +static void bcm_kona_i2c_config_timing(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + HSTIM_OFFSET) & ~HSTIM_HS_MODE_MASK, + dev->base + HSTIM_OFFSET); + + writel((dev->std_cfg->prescale << TIM_PRESCALE_SHIFT) | + (dev->std_cfg->time_p << TIM_P_SHIFT) | + (dev->std_cfg->no_div << TIM_NO_DIV_SHIFT) | + (dev->std_cfg->time_div << TIM_DIV_SHIFT), + dev->base + TIM_OFFSET); + + writel((dev->std_cfg->time_m << CLKEN_M_SHIFT) | + (dev->std_cfg->time_n << CLKEN_N_SHIFT) | + CLKEN_CLKEN_MASK, + dev->base + CLKEN_OFFSET); +} + +static void bcm_kona_i2c_config_timing_hs(struct bcm_kona_i2c_dev *dev) +{ + writel((dev->hs_cfg->prescale << TIM_PRESCALE_SHIFT) | + (dev->hs_cfg->time_p << TIM_P_SHIFT) | + (dev->hs_cfg->no_div << TIM_NO_DIV_SHIFT) | + (dev->hs_cfg->time_div << TIM_DIV_SHIFT), + dev->base + TIM_OFFSET); + + writel((dev->hs_cfg->hs_hold << HSTIM_HS_HOLD_SHIFT) | + (dev->hs_cfg->hs_high_phase << HSTIM_HS_HIGH_PHASE_SHIFT) | + (dev->hs_cfg->hs_setup << HSTIM_HS_SETUP_SHIFT), + dev->base + HSTIM_OFFSET); + + writel(readl(dev->base + HSTIM_OFFSET) | HSTIM_HS_MODE_MASK, + dev->base + HSTIM_OFFSET); +} + +static int bcm_kona_i2c_switch_to_hs(struct bcm_kona_i2c_dev *dev) +{ + int rc; + + /* Send mastercode at standard speed */ + rc = bcm_kona_i2c_write_byte(dev, MASTERCODE, 1); + if (rc < 0) { + pr_err("High speed handshake failed\n"); + return rc; + } + + /* Configure external clock to higher frequency */ + rc = clk_set_rate(dev->external_clk, HS_EXT_CLK_FREQ); + if (rc) { + dev_err(dev->device, "%s: clk_set_rate returned %d\n", + __func__, rc); + return rc; + } + + /* Reconfigure internal dividers */ + bcm_kona_i2c_config_timing_hs(dev); + + /* Send a restart command */ + rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART); + if (rc < 0) + dev_err(dev->device, "High speed restart command failed\n"); + + return rc; +} + +static int bcm_kona_i2c_switch_to_std(struct bcm_kona_i2c_dev *dev) +{ + int rc; + + /* Reconfigure internal dividers */ + bcm_kona_i2c_config_timing(dev); + + /* Configure external clock to lower frequency */ + rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ); + if (rc) { + dev_err(dev->device, "%s: clk_set_rate returned %d\n", + __func__, rc); + } + + return rc; +} + +/* Master transfer function */ +static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg msgs[], int num) +{ + struct bcm_kona_i2c_dev *dev = i2c_get_adapdata(adapter); + struct i2c_msg *pmsg; + int rc = 0; + int i; + + rc = clk_prepare_enable(dev->external_clk); + if (rc) { + dev_err(dev->device, "%s: peri clock enable failed. err %d\n", + __func__, rc); + return rc; + } + + /* Enable pad output */ + writel(0, dev->base + PADCTL_OFFSET); + + /* Enable internal clocks */ + bcm_kona_i2c_enable_clock(dev); + + /* Send start command */ + rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_START); + if (rc < 0) { + dev_err(dev->device, "Start command failed rc = %d\n", rc); + goto xfer_disable_pad; + } + + /* Switch to high speed if applicable */ + if (dev->hs_cfg) { + rc = bcm_kona_i2c_switch_to_hs(dev); + if (rc < 0) + goto xfer_send_stop; + } + + /* Loop through all messages */ + for (i = 0; i < num; i++) { + pmsg = &msgs[i]; + + /* Send restart for subsequent messages */ + if ((i != 0) && ((pmsg->flags & I2C_M_NOSTART) == 0)) { + rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART); + if (rc < 0) { + dev_err(dev->device, + "restart cmd failed rc = %d\n", rc); + goto xfer_send_stop; + } + } + + /* Send slave address */ + if (!(pmsg->flags & I2C_M_NOSTART)) { + rc = bcm_kona_i2c_do_addr(dev, pmsg); + if (rc < 0) { + dev_err(dev->device, + "NAK from addr %2.2x msg#%d rc = %d\n", + pmsg->addr, i, rc); + goto xfer_send_stop; + } + } + + /* Perform data transfer */ + if (pmsg->flags & I2C_M_RD) { + rc = bcm_kona_i2c_read_fifo(dev, pmsg); + if (rc < 0) { + dev_err(dev->device, "read failure\n"); + goto xfer_send_stop; + } + } else { + rc = bcm_kona_i2c_write_fifo(dev, pmsg); + if (rc < 0) { + dev_err(dev->device, "write failure"); + goto xfer_send_stop; + } + } + } + + rc = num; + +xfer_send_stop: + /* Send a STOP command */ + bcm_kona_send_i2c_cmd(dev, BCM_CMD_STOP); + + /* Return from high speed if applicable */ + if (dev->hs_cfg) { + int hs_rc = bcm_kona_i2c_switch_to_std(dev); + + if (hs_rc) + rc = hs_rc; + } + +xfer_disable_pad: + /* Disable pad output */ + writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET); + + /* Stop internal clock */ + bcm_kona_i2c_disable_clock(dev); + + clk_disable_unprepare(dev->external_clk); + + return rc; +} + +static uint32_t bcm_kona_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | + I2C_FUNC_NOSTART; +} + +static const struct i2c_algorithm bcm_algo = { + .master_xfer = bcm_kona_i2c_xfer, + .functionality = bcm_kona_i2c_functionality, +}; + +static int bcm_kona_i2c_assign_bus_speed(struct bcm_kona_i2c_dev *dev) +{ + unsigned int bus_speed; + int ret = of_property_read_u32(dev->device->of_node, "clock-frequency", + &bus_speed); + if (ret < 0) { + dev_err(dev->device, "missing clock-frequency property\n"); + return -ENODEV; + } + + switch (bus_speed) { + case 100000: + dev->std_cfg = &std_cfg_table[BCM_SPD_100K]; + break; + case 400000: + dev->std_cfg = &std_cfg_table[BCM_SPD_400K]; + break; + case 1000000: + dev->std_cfg = &std_cfg_table[BCM_SPD_1MHZ]; + break; + case 3400000: + /* Send mastercode at 100k */ + dev->std_cfg = &std_cfg_table[BCM_SPD_100K]; + dev->hs_cfg = &hs_cfg_table[BCM_SPD_3P4MHZ]; + break; + default: + pr_err("%d hz bus speed not supported\n", bus_speed); + pr_err("Valid speeds are 100khz, 400khz, 1mhz, and 3.4mhz\n"); + return -EINVAL; + } + + return 0; +} + +static int bcm_kona_i2c_probe(struct platform_device *pdev) +{ + int rc = 0; + struct bcm_kona_i2c_dev *dev; + struct i2c_adapter *adap; + struct resource *iomem; + + /* Allocate memory for private data structure */ + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + platform_set_drvdata(pdev, dev); + dev->device = &pdev->dev; + init_completion(&dev->done); + + /* Map hardware registers */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dev->base = devm_ioremap_resource(dev->device, iomem); + if (IS_ERR(dev->base)) + return -ENOMEM; + + /* Get and enable external clock */ + dev->external_clk = devm_clk_get(dev->device, NULL); + if (IS_ERR(dev->external_clk)) { + dev_err(dev->device, "couldn't get clock\n"); + return -ENODEV; + } + + rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ); + if (rc) { + dev_err(dev->device, "%s: clk_set_rate returned %d\n", + __func__, rc); + return rc; + } + + rc = clk_prepare_enable(dev->external_clk); + if (rc) { + dev_err(dev->device, "couldn't enable clock\n"); + return rc; + } + + /* Parse bus speed */ + rc = bcm_kona_i2c_assign_bus_speed(dev); + if (rc) + goto probe_disable_clk; + + /* Enable internal clocks */ + bcm_kona_i2c_enable_clock(dev); + + /* Configure internal dividers */ + bcm_kona_i2c_config_timing(dev); + + /* Disable timeout */ + writel(0, dev->base + TOUT_OFFSET); + + /* Enable autosense */ + bcm_kona_i2c_enable_autosense(dev); + + /* Enable TX FIFO */ + writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK, + dev->base + TXFCR_OFFSET); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + /* Clear all pending interrupts */ + writel(ISR_CMDBUSY_MASK | + ISR_READ_COMPLETE_MASK | + ISR_SES_DONE_MASK | + ISR_ERR_MASK | + ISR_TXFIFOEMPTY_MASK | + ISR_NOACK_MASK, + dev->base + ISR_OFFSET); + + /* Get the interrupt number */ + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq < 0) { + dev_err(dev->device, "no irq resource\n"); + rc = -ENODEV; + goto probe_disable_clk; + } + + /* register the ISR handler */ + rc = devm_request_irq(&pdev->dev, dev->irq, bcm_kona_i2c_isr, + IRQF_SHARED, pdev->name, dev); + if (rc) { + dev_err(dev->device, "failed to request irq %i\n", dev->irq); + goto probe_disable_clk; + } + + /* Enable the controller but leave it idle */ + bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION); + + /* Disable pad output */ + writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET); + + /* Disable internal clock */ + bcm_kona_i2c_disable_clock(dev); + + /* Disable external clock */ + clk_disable_unprepare(dev->external_clk); + + /* Add the i2c adapter */ + adap = &dev->adapter; + i2c_set_adapdata(adap, dev); + adap->owner = THIS_MODULE; + strlcpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name)); + adap->algo = &bcm_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + rc = i2c_add_adapter(adap); + if (rc) { + dev_err(dev->device, "failed to add adapter\n"); + return rc; + } + + dev_info(dev->device, "device registered successfully\n"); + + return 0; + +probe_disable_clk: + bcm_kona_i2c_disable_clock(dev); + clk_disable_unprepare(dev->external_clk); + + return rc; +} + +static int bcm_kona_i2c_remove(struct platform_device *pdev) +{ + struct bcm_kona_i2c_dev *dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&dev->adapter); + + return 0; +} + +static const struct of_device_id bcm_kona_i2c_of_match[] = { + {.compatible = "brcm,kona-i2c",}, + {}, +}; +MODULE_DEVICE_TABLE(of, kona_i2c_of_match); + +static struct platform_driver bcm_kona_i2c_driver = { + .driver = { + .name = "bcm-kona-i2c", + .owner = THIS_MODULE, + .of_match_table = bcm_kona_i2c_of_match, + }, + .probe = bcm_kona_i2c_probe, + .remove = bcm_kona_i2c_remove, +}; +module_platform_driver(bcm_kona_i2c_driver); + +MODULE_AUTHOR("Tim Kryger <tkryger@broadcom.com>"); +MODULE_DESCRIPTION("Broadcom Kona I2C Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 35a473ba3d81..3b9bd9a3f2b0 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -675,7 +675,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) p_adap->retries = 3; rc = peripheral_request_list( - (unsigned short *)dev_get_platdata(&pdev->dev), + dev_get_platdata(&pdev->dev), "i2c-bfin-twi"); if (rc) { dev_err(&pdev->dev, "Can't setup pin mux!\n"); @@ -723,7 +723,7 @@ out_error_add_adapter: free_irq(iface->irq, iface); out_error_req_irq: out_error_no_irq: - peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev)); + peripheral_free_list(dev_get_platdata(&pdev->dev)); out_error_pin_mux: iounmap(iface->regs_base); out_error_ioremap: @@ -739,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev) i2c_del_adapter(&(iface->adap)); free_irq(iface->irq, iface); - peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev)); + peripheral_free_list(dev_get_platdata(&pdev->dev)); iounmap(iface->regs_base); kfree(iface); diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c index 2d46f13adfdf..ce7ffba2b020 100644 --- a/drivers/i2c/busses/i2c-cbus-gpio.c +++ b/drivers/i2c/busses/i2c-cbus-gpio.c @@ -246,6 +246,7 @@ static int cbus_i2c_probe(struct platform_device *pdev) adapter->owner = THIS_MODULE; adapter->class = I2C_CLASS_HWMON; adapter->dev.parent = &pdev->dev; + adapter->dev.of_node = pdev->dev.of_node; adapter->nr = pdev->id; adapter->timeout = HZ; adapter->algo = &cbus_i2c_algo; @@ -289,6 +290,7 @@ static struct platform_driver cbus_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = "i2c-cbus-gpio", + .of_match_table = of_match_ptr(i2c_cbus_dt_ids), }, }; module_platform_driver(cbus_i2c_driver); diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 960dec61c64e..ff05d9fef4a8 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -795,7 +795,7 @@ static struct platform_driver davinci_i2c_driver = { .name = "i2c_davinci", .owner = THIS_MODULE, .pm = davinci_i2c_pm_ops, - .of_match_table = of_match_ptr(davinci_i2c_of_match), + .of_match_table = davinci_i2c_of_match, }, }; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0aa01136f8d9..d0bdac0498ce 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -103,6 +103,8 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, { "INT33C3", 0 }, + { "INT3432", 0 }, + { "INT3433", 0 }, { "80860F41", 0 }, { } }; diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 0f3752967c4b..ff15ae90aaf5 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -312,24 +312,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap) } /** - * pch_i2c_getack() - to confirm ACK/NACK - * @adap: Pointer to struct i2c_algo_pch_data. - */ -static s32 pch_i2c_getack(struct i2c_algo_pch_data *adap) -{ - u32 reg_val; - void __iomem *p = adap->pch_base_address; - reg_val = ioread32(p + PCH_I2CSR) & PCH_GETACK; - - if (reg_val != 0) { - pch_err(adap, "return%d\n", -EPROTO); - return -EPROTO; - } - - return 0; -} - -/** * pch_i2c_stop() - generate stop condition in normal mode. * @adap: Pointer to struct i2c_algo_pch_data. */ @@ -344,6 +326,7 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap) static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap) { long ret; + void __iomem *p = adap->pch_base_address; ret = wait_event_timeout(pch_event, (adap->pch_event_flag != 0), msecs_to_jiffies(1000)); @@ -366,10 +349,9 @@ static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap) adap->pch_event_flag = 0; - if (pch_i2c_getack(adap)) { - pch_dbg(adap, "Receive NACK for slave address" - "setting\n"); - return -EIO; + if (ioread32(p + PCH_I2CSR) & PCH_GETACK) { + pch_dbg(adap, "Receive NACK for slave address setting\n"); + return -ENXIO; } return 0; diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c new file mode 100644 index 000000000000..c1ef228095b5 --- /dev/null +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -0,0 +1,769 @@ +/** + * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/time.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/spinlock.h> + +/* + * HSI2C controller from Samsung supports 2 modes of operation + * 1. Auto mode: Where in master automatically controls the whole transaction + * 2. Manual mode: Software controls the transaction by issuing commands + * START, READ, WRITE, STOP, RESTART in I2C_MANUAL_CMD register. + * + * Operation mode can be selected by setting AUTO_MODE bit in I2C_CONF register + * + * Special bits are available for both modes of operation to set commands + * and for checking transfer status + */ + +/* Register Map */ +#define HSI2C_CTL 0x00 +#define HSI2C_FIFO_CTL 0x04 +#define HSI2C_TRAILIG_CTL 0x08 +#define HSI2C_CLK_CTL 0x0C +#define HSI2C_CLK_SLOT 0x10 +#define HSI2C_INT_ENABLE 0x20 +#define HSI2C_INT_STATUS 0x24 +#define HSI2C_ERR_STATUS 0x2C +#define HSI2C_FIFO_STATUS 0x30 +#define HSI2C_TX_DATA 0x34 +#define HSI2C_RX_DATA 0x38 +#define HSI2C_CONF 0x40 +#define HSI2C_AUTO_CONF 0x44 +#define HSI2C_TIMEOUT 0x48 +#define HSI2C_MANUAL_CMD 0x4C +#define HSI2C_TRANS_STATUS 0x50 +#define HSI2C_TIMING_HS1 0x54 +#define HSI2C_TIMING_HS2 0x58 +#define HSI2C_TIMING_HS3 0x5C +#define HSI2C_TIMING_FS1 0x60 +#define HSI2C_TIMING_FS2 0x64 +#define HSI2C_TIMING_FS3 0x68 +#define HSI2C_TIMING_SLA 0x6C +#define HSI2C_ADDR 0x70 + +/* I2C_CTL Register bits */ +#define HSI2C_FUNC_MODE_I2C (1u << 0) +#define HSI2C_MASTER (1u << 3) +#define HSI2C_RXCHON (1u << 6) +#define HSI2C_TXCHON (1u << 7) +#define HSI2C_SW_RST (1u << 31) + +/* I2C_FIFO_CTL Register bits */ +#define HSI2C_RXFIFO_EN (1u << 0) +#define HSI2C_TXFIFO_EN (1u << 1) +#define HSI2C_RXFIFO_TRIGGER_LEVEL(x) ((x) << 4) +#define HSI2C_TXFIFO_TRIGGER_LEVEL(x) ((x) << 16) + +/* As per user manual FIFO max depth is 64bytes */ +#define HSI2C_FIFO_MAX 0x40 +/* default trigger levels for Tx and Rx FIFOs */ +#define HSI2C_DEF_TXFIFO_LVL (HSI2C_FIFO_MAX - 0x30) +#define HSI2C_DEF_RXFIFO_LVL (HSI2C_FIFO_MAX - 0x10) + +/* I2C_TRAILING_CTL Register bits */ +#define HSI2C_TRAILING_COUNT (0xf) + +/* I2C_INT_EN Register bits */ +#define HSI2C_INT_TX_ALMOSTEMPTY_EN (1u << 0) +#define HSI2C_INT_RX_ALMOSTFULL_EN (1u << 1) +#define HSI2C_INT_TRAILING_EN (1u << 6) +#define HSI2C_INT_I2C_EN (1u << 9) + +/* I2C_INT_STAT Register bits */ +#define HSI2C_INT_TX_ALMOSTEMPTY (1u << 0) +#define HSI2C_INT_RX_ALMOSTFULL (1u << 1) +#define HSI2C_INT_TX_UNDERRUN (1u << 2) +#define HSI2C_INT_TX_OVERRUN (1u << 3) +#define HSI2C_INT_RX_UNDERRUN (1u << 4) +#define HSI2C_INT_RX_OVERRUN (1u << 5) +#define HSI2C_INT_TRAILING (1u << 6) +#define HSI2C_INT_I2C (1u << 9) + +/* I2C_FIFO_STAT Register bits */ +#define HSI2C_RX_FIFO_EMPTY (1u << 24) +#define HSI2C_RX_FIFO_FULL (1u << 23) +#define HSI2C_RX_FIFO_LVL(x) ((x >> 16) & 0x7f) +#define HSI2C_TX_FIFO_EMPTY (1u << 8) +#define HSI2C_TX_FIFO_FULL (1u << 7) +#define HSI2C_TX_FIFO_LVL(x) ((x >> 0) & 0x7f) + +/* I2C_CONF Register bits */ +#define HSI2C_AUTO_MODE (1u << 31) +#define HSI2C_10BIT_ADDR_MODE (1u << 30) +#define HSI2C_HS_MODE (1u << 29) + +/* I2C_AUTO_CONF Register bits */ +#define HSI2C_READ_WRITE (1u << 16) +#define HSI2C_STOP_AFTER_TRANS (1u << 17) +#define HSI2C_MASTER_RUN (1u << 31) + +/* I2C_TIMEOUT Register bits */ +#define HSI2C_TIMEOUT_EN (1u << 31) +#define HSI2C_TIMEOUT_MASK 0xff + +/* I2C_TRANS_STATUS register bits */ +#define HSI2C_MASTER_BUSY (1u << 17) +#define HSI2C_SLAVE_BUSY (1u << 16) +#define HSI2C_TIMEOUT_AUTO (1u << 4) +#define HSI2C_NO_DEV (1u << 3) +#define HSI2C_NO_DEV_ACK (1u << 2) +#define HSI2C_TRANS_ABORT (1u << 1) +#define HSI2C_TRANS_DONE (1u << 0) + +/* I2C_ADDR register bits */ +#define HSI2C_SLV_ADDR_SLV(x) ((x & 0x3ff) << 0) +#define HSI2C_SLV_ADDR_MAS(x) ((x & 0x3ff) << 10) +#define HSI2C_MASTER_ID(x) ((x & 0xff) << 24) +#define MASTER_ID(x) ((x & 0x7) + 0x08) + +/* + * Controller operating frequency, timing values for operation + * are calculated against this frequency + */ +#define HSI2C_HS_TX_CLOCK 1000000 +#define HSI2C_FS_TX_CLOCK 100000 +#define HSI2C_HIGH_SPD 1 +#define HSI2C_FAST_SPD 0 + +#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(1000)) + +struct exynos5_i2c { + struct i2c_adapter adap; + unsigned int suspended:1; + + struct i2c_msg *msg; + struct completion msg_complete; + unsigned int msg_ptr; + + unsigned int irq; + + void __iomem *regs; + struct clk *clk; + struct device *dev; + int state; + + spinlock_t lock; /* IRQ synchronization */ + + /* + * Since the TRANS_DONE bit is cleared on read, and we may read it + * either during an IRQ or after a transaction, keep track of its + * state here. + */ + int trans_done; + + /* Controller operating frequency */ + unsigned int fs_clock; + unsigned int hs_clock; + + /* + * HSI2C Controller can operate in + * 1. High speed upto 3.4Mbps + * 2. Fast speed upto 1Mbps + */ + int speed_mode; +}; + +static const struct of_device_id exynos5_i2c_match[] = { + { .compatible = "samsung,exynos5-hsi2c" }, + {}, +}; +MODULE_DEVICE_TABLE(of, exynos5_i2c_match); + +static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c) +{ + writel(readl(i2c->regs + HSI2C_INT_STATUS), + i2c->regs + HSI2C_INT_STATUS); +} + +/* + * exynos5_i2c_set_timing: updates the registers with appropriate + * timing values calculated + * + * Returns 0 on success, -EINVAL if the cycle length cannot + * be calculated. + */ +static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, int mode) +{ + u32 i2c_timing_s1; + u32 i2c_timing_s2; + u32 i2c_timing_s3; + u32 i2c_timing_sla; + unsigned int t_start_su, t_start_hd; + unsigned int t_stop_su; + unsigned int t_data_su, t_data_hd; + unsigned int t_scl_l, t_scl_h; + unsigned int t_sr_release; + unsigned int t_ftl_cycle; + unsigned int clkin = clk_get_rate(i2c->clk); + unsigned int div, utemp0 = 0, utemp1 = 0, clk_cycle; + unsigned int op_clk = (mode == HSI2C_HIGH_SPD) ? + i2c->hs_clock : i2c->fs_clock; + + /* + * FPCLK / FI2C = + * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE + * utemp0 = (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + * utemp1 = (TSCLK_L + TSCLK_H + 2) + */ + t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7; + utemp0 = (clkin / op_clk) - 8 - 2 * t_ftl_cycle; + + /* CLK_DIV max is 256 */ + for (div = 0; div < 256; div++) { + utemp1 = utemp0 / (div + 1); + + /* + * SCL_L and SCL_H each has max value of 255 + * Hence, For the clk_cycle to the have right value + * utemp1 has to be less then 512 and more than 4. + */ + if ((utemp1 < 512) && (utemp1 > 4)) { + clk_cycle = utemp1 - 2; + break; + } else if (div == 255) { + dev_warn(i2c->dev, "Failed to calculate divisor"); + return -EINVAL; + } + } + + t_scl_l = clk_cycle / 2; + t_scl_h = clk_cycle / 2; + t_start_su = t_scl_l; + t_start_hd = t_scl_l; + t_stop_su = t_scl_l; + t_data_su = t_scl_l / 2; + t_data_hd = t_scl_l / 2; + t_sr_release = clk_cycle; + + i2c_timing_s1 = t_start_su << 24 | t_start_hd << 16 | t_stop_su << 8; + i2c_timing_s2 = t_data_su << 24 | t_scl_l << 8 | t_scl_h << 0; + i2c_timing_s3 = div << 16 | t_sr_release << 0; + i2c_timing_sla = t_data_hd << 0; + + dev_dbg(i2c->dev, "tSTART_SU: %X, tSTART_HD: %X, tSTOP_SU: %X\n", + t_start_su, t_start_hd, t_stop_su); + dev_dbg(i2c->dev, "tDATA_SU: %X, tSCL_L: %X, tSCL_H: %X\n", + t_data_su, t_scl_l, t_scl_h); + dev_dbg(i2c->dev, "nClkDiv: %X, tSR_RELEASE: %X\n", + div, t_sr_release); + dev_dbg(i2c->dev, "tDATA_HD: %X\n", t_data_hd); + + if (mode == HSI2C_HIGH_SPD) { + writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_HS1); + writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_HS2); + writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3); + } else { + writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_FS1); + writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_FS2); + writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3); + } + writel(i2c_timing_sla, i2c->regs + HSI2C_TIMING_SLA); + + return 0; +} + +static int exynos5_hsi2c_clock_setup(struct exynos5_i2c *i2c) +{ + /* + * Configure the Fast speed timing values + * Even the High Speed mode initially starts with Fast mode + */ + if (exynos5_i2c_set_timing(i2c, HSI2C_FAST_SPD)) { + dev_err(i2c->dev, "HSI2C FS Clock set up failed\n"); + return -EINVAL; + } + + /* configure the High speed timing values */ + if (i2c->speed_mode == HSI2C_HIGH_SPD) { + if (exynos5_i2c_set_timing(i2c, HSI2C_HIGH_SPD)) { + dev_err(i2c->dev, "HSI2C HS Clock set up failed\n"); + return -EINVAL; + } + } + + return 0; +} + +/* + * exynos5_i2c_init: configures the controller for I2C functionality + * Programs I2C controller for Master mode operation + */ +static void exynos5_i2c_init(struct exynos5_i2c *i2c) +{ + u32 i2c_conf = readl(i2c->regs + HSI2C_CONF); + u32 i2c_timeout = readl(i2c->regs + HSI2C_TIMEOUT); + + /* Clear to disable Timeout */ + i2c_timeout &= ~HSI2C_TIMEOUT_EN; + writel(i2c_timeout, i2c->regs + HSI2C_TIMEOUT); + + writel((HSI2C_FUNC_MODE_I2C | HSI2C_MASTER), + i2c->regs + HSI2C_CTL); + writel(HSI2C_TRAILING_COUNT, i2c->regs + HSI2C_TRAILIG_CTL); + + if (i2c->speed_mode == HSI2C_HIGH_SPD) { + writel(HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)), + i2c->regs + HSI2C_ADDR); + i2c_conf |= HSI2C_HS_MODE; + } + + writel(i2c_conf | HSI2C_AUTO_MODE, i2c->regs + HSI2C_CONF); +} + +static void exynos5_i2c_reset(struct exynos5_i2c *i2c) +{ + u32 i2c_ctl; + + /* Set and clear the bit for reset */ + i2c_ctl = readl(i2c->regs + HSI2C_CTL); + i2c_ctl |= HSI2C_SW_RST; + writel(i2c_ctl, i2c->regs + HSI2C_CTL); + + i2c_ctl = readl(i2c->regs + HSI2C_CTL); + i2c_ctl &= ~HSI2C_SW_RST; + writel(i2c_ctl, i2c->regs + HSI2C_CTL); + + /* We don't expect calculations to fail during the run */ + exynos5_hsi2c_clock_setup(i2c); + /* Initialize the configure registers */ + exynos5_i2c_init(i2c); +} + +/* + * exynos5_i2c_irq: top level IRQ servicing routine + * + * INT_STATUS registers gives the interrupt details. Further, + * FIFO_STATUS or TRANS_STATUS registers are to be check for detailed + * state of the bus. + */ +static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) +{ + struct exynos5_i2c *i2c = dev_id; + u32 fifo_level, int_status, fifo_status, trans_status; + unsigned char byte; + int len = 0; + + i2c->state = -EINVAL; + + spin_lock(&i2c->lock); + + int_status = readl(i2c->regs + HSI2C_INT_STATUS); + writel(int_status, i2c->regs + HSI2C_INT_STATUS); + fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); + + /* handle interrupt related to the transfer status */ + if (int_status & HSI2C_INT_I2C) { + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); + if (trans_status & HSI2C_NO_DEV_ACK) { + dev_dbg(i2c->dev, "No ACK from device\n"); + i2c->state = -ENXIO; + goto stop; + } else if (trans_status & HSI2C_NO_DEV) { + dev_dbg(i2c->dev, "No device\n"); + i2c->state = -ENXIO; + goto stop; + } else if (trans_status & HSI2C_TRANS_ABORT) { + dev_dbg(i2c->dev, "Deal with arbitration lose\n"); + i2c->state = -EAGAIN; + goto stop; + } else if (trans_status & HSI2C_TIMEOUT_AUTO) { + dev_dbg(i2c->dev, "Accessing device timed out\n"); + i2c->state = -EAGAIN; + goto stop; + } else if (trans_status & HSI2C_TRANS_DONE) { + i2c->trans_done = 1; + i2c->state = 0; + } + } + + if ((i2c->msg->flags & I2C_M_RD) && (int_status & + (HSI2C_INT_TRAILING | HSI2C_INT_RX_ALMOSTFULL))) { + fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); + fifo_level = HSI2C_RX_FIFO_LVL(fifo_status); + len = min(fifo_level, i2c->msg->len - i2c->msg_ptr); + + while (len > 0) { + byte = (unsigned char) + readl(i2c->regs + HSI2C_RX_DATA); + i2c->msg->buf[i2c->msg_ptr++] = byte; + len--; + } + i2c->state = 0; + } else if (int_status & HSI2C_INT_TX_ALMOSTEMPTY) { + fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); + fifo_level = HSI2C_TX_FIFO_LVL(fifo_status); + + len = HSI2C_FIFO_MAX - fifo_level; + if (len > (i2c->msg->len - i2c->msg_ptr)) + len = i2c->msg->len - i2c->msg_ptr; + + while (len > 0) { + byte = i2c->msg->buf[i2c->msg_ptr++]; + writel(byte, i2c->regs + HSI2C_TX_DATA); + len--; + } + i2c->state = 0; + } + + stop: + if ((i2c->trans_done && (i2c->msg->len == i2c->msg_ptr)) || + (i2c->state < 0)) { + writel(0, i2c->regs + HSI2C_INT_ENABLE); + exynos5_i2c_clr_pend_irq(i2c); + complete(&i2c->msg_complete); + } + + spin_unlock(&i2c->lock); + + return IRQ_HANDLED; +} + +/* + * exynos5_i2c_wait_bus_idle + * + * Wait for the bus to go idle, indicated by the MASTER_BUSY bit being + * cleared. + * + * Returns -EBUSY if the bus cannot be bought to idle + */ +static int exynos5_i2c_wait_bus_idle(struct exynos5_i2c *i2c) +{ + unsigned long stop_time; + u32 trans_status; + + /* wait for 100 milli seconds for the bus to be idle */ + stop_time = jiffies + msecs_to_jiffies(100) + 1; + do { + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); + if (!(trans_status & HSI2C_MASTER_BUSY)) + return 0; + + usleep_range(50, 200); + } while (time_before(jiffies, stop_time)); + + return -EBUSY; +} + +/* + * exynos5_i2c_message_start: Configures the bus and starts the xfer + * i2c: struct exynos5_i2c pointer for the current bus + * stop: Enables stop after transfer if set. Set for last transfer of + * in the list of messages. + * + * Configures the bus for read/write function + * Sets chip address to talk to, message length to be sent. + * Enables appropriate interrupts and sends start xfer command. + */ +static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) +{ + u32 i2c_ctl; + u32 int_en = HSI2C_INT_I2C_EN; + u32 i2c_auto_conf = 0; + u32 fifo_ctl; + unsigned long flags; + + i2c_ctl = readl(i2c->regs + HSI2C_CTL); + i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON); + fifo_ctl = HSI2C_RXFIFO_EN | HSI2C_TXFIFO_EN; + + if (i2c->msg->flags & I2C_M_RD) { + i2c_ctl |= HSI2C_RXCHON; + + i2c_auto_conf = HSI2C_READ_WRITE; + + fifo_ctl |= HSI2C_RXFIFO_TRIGGER_LEVEL(HSI2C_DEF_TXFIFO_LVL); + int_en |= (HSI2C_INT_RX_ALMOSTFULL_EN | + HSI2C_INT_TRAILING_EN); + } else { + i2c_ctl |= HSI2C_TXCHON; + + fifo_ctl |= HSI2C_TXFIFO_TRIGGER_LEVEL(HSI2C_DEF_RXFIFO_LVL); + int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN; + } + + writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR); + + writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL); + writel(i2c_ctl, i2c->regs + HSI2C_CTL); + + + /* + * Enable interrupts before starting the transfer so that we don't + * miss any INT_I2C interrupts. + */ + spin_lock_irqsave(&i2c->lock, flags); + writel(int_en, i2c->regs + HSI2C_INT_ENABLE); + + if (stop == 1) + i2c_auto_conf |= HSI2C_STOP_AFTER_TRANS; + i2c_auto_conf |= i2c->msg->len; + i2c_auto_conf |= HSI2C_MASTER_RUN; + writel(i2c_auto_conf, i2c->regs + HSI2C_AUTO_CONF); + spin_unlock_irqrestore(&i2c->lock, flags); +} + +static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c, + struct i2c_msg *msgs, int stop) +{ + unsigned long timeout; + int ret; + + i2c->msg = msgs; + i2c->msg_ptr = 0; + i2c->trans_done = 0; + + reinit_completion(&i2c->msg_complete); + + exynos5_i2c_message_start(i2c, stop); + + timeout = wait_for_completion_timeout(&i2c->msg_complete, + EXYNOS5_I2C_TIMEOUT); + if (timeout == 0) + ret = -ETIMEDOUT; + else + ret = i2c->state; + + /* + * If this is the last message to be transfered (stop == 1) + * Then check if the bus can be brought back to idle. + */ + if (ret == 0 && stop) + ret = exynos5_i2c_wait_bus_idle(i2c); + + if (ret < 0) { + exynos5_i2c_reset(i2c); + if (ret == -ETIMEDOUT) + dev_warn(i2c->dev, "%s timeout\n", + (msgs->flags & I2C_M_RD) ? "rx" : "tx"); + } + + /* Return the state as in interrupt routine */ + return ret; +} + +static int exynos5_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct exynos5_i2c *i2c = (struct exynos5_i2c *)adap->algo_data; + int i = 0, ret = 0, stop = 0; + + if (i2c->suspended) { + dev_err(i2c->dev, "HS-I2C is not initialzed.\n"); + return -EIO; + } + + clk_prepare_enable(i2c->clk); + + for (i = 0; i < num; i++, msgs++) { + stop = (i == num - 1); + + ret = exynos5_i2c_xfer_msg(i2c, msgs, stop); + + if (ret < 0) + goto out; + } + + if (i == num) { + ret = num; + } else { + /* Only one message, cannot access the device */ + if (i == 1) + ret = -EREMOTEIO; + else + ret = i; + + dev_warn(i2c->dev, "xfer message failed\n"); + } + + out: + clk_disable_unprepare(i2c->clk); + return ret; +} + +static u32 exynos5_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); +} + +static const struct i2c_algorithm exynos5_i2c_algorithm = { + .master_xfer = exynos5_i2c_xfer, + .functionality = exynos5_i2c_func, +}; + +static int exynos5_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct exynos5_i2c *i2c; + struct resource *mem; + unsigned int op_clock; + int ret; + + i2c = devm_kzalloc(&pdev->dev, sizeof(struct exynos5_i2c), GFP_KERNEL); + if (!i2c) { + dev_err(&pdev->dev, "no memory for state\n"); + return -ENOMEM; + } + + if (of_property_read_u32(np, "clock-frequency", &op_clock)) { + i2c->speed_mode = HSI2C_FAST_SPD; + i2c->fs_clock = HSI2C_FS_TX_CLOCK; + } else { + if (op_clock >= HSI2C_HS_TX_CLOCK) { + i2c->speed_mode = HSI2C_HIGH_SPD; + i2c->fs_clock = HSI2C_FS_TX_CLOCK; + i2c->hs_clock = op_clock; + } else { + i2c->speed_mode = HSI2C_FAST_SPD; + i2c->fs_clock = op_clock; + } + } + + strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name)); + i2c->adap.owner = THIS_MODULE; + i2c->adap.algo = &exynos5_i2c_algorithm; + i2c->adap.retries = 3; + + i2c->dev = &pdev->dev; + i2c->clk = devm_clk_get(&pdev->dev, "hsi2c"); + if (IS_ERR(i2c->clk)) { + dev_err(&pdev->dev, "cannot get clock\n"); + return -ENOENT; + } + + clk_prepare_enable(i2c->clk); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(i2c->regs)) { + ret = PTR_ERR(i2c->regs); + goto err_clk; + } + + i2c->adap.dev.of_node = np; + i2c->adap.algo_data = i2c; + i2c->adap.dev.parent = &pdev->dev; + + /* Clear pending interrupts from u-boot or misc causes */ + exynos5_i2c_clr_pend_irq(i2c); + + spin_lock_init(&i2c->lock); + init_completion(&i2c->msg_complete); + + i2c->irq = ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(&pdev->dev, "cannot find HS-I2C IRQ\n"); + ret = -EINVAL; + goto err_clk; + } + + ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&pdev->dev), i2c); + + if (ret != 0) { + dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq); + goto err_clk; + } + + ret = exynos5_hsi2c_clock_setup(i2c); + if (ret) + goto err_clk; + + exynos5_i2c_init(i2c); + + ret = i2c_add_adapter(&i2c->adap); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add bus to i2c core\n"); + goto err_clk; + } + + platform_set_drvdata(pdev, i2c); + + err_clk: + clk_disable_unprepare(i2c->clk); + return ret; +} + +static int exynos5_i2c_remove(struct platform_device *pdev) +{ + struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c->adap); + + return 0; +} + +static int exynos5_i2c_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + + i2c->suspended = 1; + + return 0; +} + +static int exynos5_i2c_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + int ret = 0; + + clk_prepare_enable(i2c->clk); + + ret = exynos5_hsi2c_clock_setup(i2c); + if (ret) { + clk_disable_unprepare(i2c->clk); + return ret; + } + + exynos5_i2c_init(i2c); + clk_disable_unprepare(i2c->clk); + i2c->suspended = 0; + + return 0; +} + +static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq, + exynos5_i2c_resume_noirq); + +static struct platform_driver exynos5_i2c_driver = { + .probe = exynos5_i2c_probe, + .remove = exynos5_i2c_remove, + .driver = { + .owner = THIS_MODULE, + .name = "exynos5-hsi2c", + .pm = &exynos5_i2c_dev_pm_ops, + .of_match_table = exynos5_i2c_match, + }, +}; + +module_platform_driver(exynos5_i2c_driver); + +MODULE_DESCRIPTION("Exynos5 HS-I2C Bus driver"); +MODULE_AUTHOR("Naveen Krishna Chatradhi, <ch.naveen@samsung.com>"); +MODULE_AUTHOR("Taekgyun Ko, <taeggyun.ko@samsung.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index bfa02c6c2dda..d9f7e186a4c7 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/gpio.h> +#include <linux/of.h> #include <linux/of_gpio.h> struct i2c_gpio_private_data { diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 4296d1721272..737e29866887 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -59,6 +59,7 @@ Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes Coleto Creek (PCH) 0x23b0 32 hard yes yes yes + Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes Features supported by this driver: Software PEC no @@ -177,6 +178,7 @@ #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 +#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 struct i801_mux_config { char *gpio_chip; @@ -819,6 +821,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index d3e9cc3153a9..8be7e42aa4de 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -911,7 +911,7 @@ static struct platform_driver mv64xxx_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = MV64XXX_I2C_CTLR_NAME, - .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table), + .of_match_table = mv64xxx_i2c_of_match_table, }, }; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 3aedd86a6468..0cde4e6ab2b2 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -1,6 +1,7 @@ /* * Freescale MXS I2C bus driver * + * Copyright (C) 2012-2013 Marek Vasut <marex@denx.de> * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K. * * based on a (non-working) driver which was: @@ -34,10 +35,12 @@ #define MXS_I2C_CTRL0 (0x00) #define MXS_I2C_CTRL0_SET (0x04) +#define MXS_I2C_CTRL0_CLR (0x08) #define MXS_I2C_CTRL0_SFTRST 0x80000000 #define MXS_I2C_CTRL0_RUN 0x20000000 #define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000 +#define MXS_I2C_CTRL0_PIO_MODE 0x01000000 #define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000 #define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000 #define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000 @@ -64,13 +67,13 @@ #define MXS_I2C_CTRL1_SLAVE_IRQ 0x01 #define MXS_I2C_STAT (0x50) +#define MXS_I2C_STAT_GOT_A_NAK 0x10000000 #define MXS_I2C_STAT_BUS_BUSY 0x00000800 #define MXS_I2C_STAT_CLK_GEN_BUSY 0x00000400 -#define MXS_I2C_DATA (0xa0) +#define MXS_I2C_DATA(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x60 : 0xa0) -#define MXS_I2C_DEBUG0 (0xb0) -#define MXS_I2C_DEBUG0_CLR (0xb8) +#define MXS_I2C_DEBUG0_CLR(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x78 : 0xb8) #define MXS_I2C_DEBUG0_DMAREQ 0x80000000 @@ -95,10 +98,17 @@ #define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \ MXS_I2C_CTRL0_MASTER_MODE) +enum mxs_i2c_devtype { + MXS_I2C_UNKNOWN = 0, + MXS_I2C_V1, + MXS_I2C_V2, +}; + /** * struct mxs_i2c_dev - per device, private MXS-I2C data * * @dev: driver model device node + * @dev_type: distinguish i.MX23/i.MX28 features * @regs: IO registers pointer * @cmd_complete: completion object for transaction wait * @cmd_err: error code for last transaction @@ -106,6 +116,7 @@ */ struct mxs_i2c_dev { struct device *dev; + enum mxs_i2c_devtype dev_type; void __iomem *regs; struct completion cmd_complete; int cmd_err; @@ -291,48 +302,11 @@ write_init_pio_fail: return -EINVAL; } -static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c) +static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); - while (!(readl(i2c->regs + MXS_I2C_DEBUG0) & - MXS_I2C_DEBUG0_DMAREQ)) { - if (time_after(jiffies, timeout)) - return -ETIMEDOUT; - cond_resched(); - } - - return 0; -} - -static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c, int last) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(1000); - - /* - * We do not use interrupts in the PIO mode. Due to the - * maximum transfer length being 8 bytes in PIO mode, the - * overhead of interrupt would be too large and this would - * neglect the gain from using the PIO mode. - */ - - while (!(readl(i2c->regs + MXS_I2C_CTRL1) & - MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) { - if (time_after(jiffies, timeout)) - return -ETIMEDOUT; - cond_resched(); - } - - writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ, - i2c->regs + MXS_I2C_CTRL1_CLR); - - /* - * When ending a transfer with a stop, we have to wait for the bus to - * go idle before we report the transfer as completed. Otherwise the - * start of the next transfer may race with the end of the current one. - */ - while (last && (readl(i2c->regs + MXS_I2C_STAT) & - (MXS_I2C_STAT_BUS_BUSY | MXS_I2C_STAT_CLK_GEN_BUSY))) { + while (readl(i2c->regs + MXS_I2C_CTRL0) & MXS_I2C_CTRL0_RUN) { if (time_after(jiffies, timeout)) return -ETIMEDOUT; cond_resched(); @@ -370,106 +344,215 @@ static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd) writel(reg, i2c->regs + MXS_I2C_CTRL0); } +/* + * Start WRITE transaction on the I2C bus. By studying i.MX23 datasheet, + * CTRL0::PIO_MODE bit description clarifies the order in which the registers + * must be written during PIO mode operation. First, the CTRL0 register has + * to be programmed with all the necessary bits but the RUN bit. Then the + * payload has to be written into the DATA register. Finally, the transmission + * is executed by setting the RUN bit in CTRL0. + */ +static void mxs_i2c_pio_trigger_write_cmd(struct mxs_i2c_dev *i2c, u32 cmd, + u32 data) +{ + writel(cmd, i2c->regs + MXS_I2C_CTRL0); + + if (i2c->dev_type == MXS_I2C_V1) + writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_SET); + + writel(data, i2c->regs + MXS_I2C_DATA(i2c)); + writel(MXS_I2C_CTRL0_RUN, i2c->regs + MXS_I2C_CTRL0_SET); +} + static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, uint32_t flags) { struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); uint32_t addr_data = msg->addr << 1; uint32_t data = 0; - int i, shifts_left, ret; + int i, ret, xlen = 0, xmit = 0; + uint32_t start; /* Mute IRQs coming from this block. */ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR); + /* + * MX23 idea: + * - Enable CTRL0::PIO_MODE (1 << 24) + * - Enable CTRL1::ACK_MODE (1 << 27) + * + * WARNING! The MX23 is broken in some way, even if it claims + * to support PIO, when we try to transfer any amount of data + * that is not aligned to 4 bytes, the DMA engine will have + * bits in DEBUG1::DMA_BYTES_ENABLES still set even after the + * transfer. This in turn will mess up the next transfer as + * the block it emit one byte write onto the bus terminated + * with a NAK+STOP. A possible workaround is to reset the IP + * block after every PIO transmission, which might just work. + * + * NOTE: The CTRL0::PIO_MODE description is important, since + * it outlines how the PIO mode is really supposed to work. + */ if (msg->flags & I2C_M_RD) { + /* + * PIO READ transfer: + * + * This transfer MUST be limited to 4 bytes maximum. It is not + * possible to transfer more than four bytes via PIO, since we + * can not in any way make sure we can read the data from the + * DATA register fast enough. Besides, the RX FIFO is only four + * bytes deep, thus we can only really read up to four bytes at + * time. Finally, there is no bit indicating us that new data + * arrived at the FIFO and can thus be fetched from the DATA + * register. + */ + BUG_ON(msg->len > 4); + addr_data |= I2C_SMBUS_READ; /* SELECT command. */ - mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_SELECT); - - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - - writel(addr_data, i2c->regs + MXS_I2C_DATA); - writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR); + mxs_i2c_pio_trigger_write_cmd(i2c, MXS_CMD_I2C_SELECT, + addr_data); - ret = mxs_i2c_pio_wait_cplt(i2c, 0); - if (ret) - return ret; - - if (mxs_i2c_pio_check_error_state(i2c)) + ret = mxs_i2c_pio_wait_xfer_end(i2c); + if (ret) { + dev_err(i2c->dev, + "PIO: Failed to send SELECT command!\n"); goto cleanup; + } /* READ command. */ mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_READ | flags | MXS_I2C_CTRL0_XFER_COUNT(msg->len)); + ret = mxs_i2c_pio_wait_xfer_end(i2c); + if (ret) { + dev_err(i2c->dev, + "PIO: Failed to send SELECT command!\n"); + goto cleanup; + } + + data = readl(i2c->regs + MXS_I2C_DATA(i2c)); for (i = 0; i < msg->len; i++) { - if ((i & 3) == 0) { - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - data = readl(i2c->regs + MXS_I2C_DATA); - writel(MXS_I2C_DEBUG0_DMAREQ, - i2c->regs + MXS_I2C_DEBUG0_CLR); - } msg->buf[i] = data & 0xff; data >>= 8; } } else { + /* + * PIO WRITE transfer: + * + * The code below implements clock stretching to circumvent + * the possibility of kernel not being able to supply data + * fast enough. It is possible to transfer arbitrary amount + * of data using PIO write. + */ addr_data |= I2C_SMBUS_WRITE; - /* WRITE command. */ - mxs_i2c_pio_trigger_cmd(i2c, - MXS_CMD_I2C_WRITE | flags | - MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1)); - /* * The LSB of data buffer is the first byte blasted across * the bus. Higher order bytes follow. Thus the following * filling schematic. */ + data = addr_data << 24; + + /* Start the transfer with START condition. */ + start = MXS_I2C_CTRL0_PRE_SEND_START; + + /* If the transfer is long, use clock stretching. */ + if (msg->len > 3) + start |= MXS_I2C_CTRL0_RETAIN_CLOCK; + for (i = 0; i < msg->len; i++) { data >>= 8; data |= (msg->buf[i] << 24); - if ((i & 3) == 2) { - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - writel(data, i2c->regs + MXS_I2C_DATA); - writel(MXS_I2C_DEBUG0_DMAREQ, - i2c->regs + MXS_I2C_DEBUG0_CLR); + + xmit = 0; + + /* This is the last transfer of the message. */ + if (i + 1 == msg->len) { + /* Add optional STOP flag. */ + start |= flags; + /* Remove RETAIN_CLOCK bit. */ + start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK; + xmit = 1; } - } - shifts_left = 24 - (i & 3) * 8; - if (shifts_left) { - data >>= shifts_left; - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - writel(data, i2c->regs + MXS_I2C_DATA); + /* Four bytes are ready in the "data" variable. */ + if ((i & 3) == 2) + xmit = 1; + + /* Nothing interesting happened, continue stuffing. */ + if (!xmit) + continue; + + /* + * Compute the size of the transfer and shift the + * data accordingly. + * + * i = (4k + 0) .... xlen = 2 + * i = (4k + 1) .... xlen = 3 + * i = (4k + 2) .... xlen = 4 + * i = (4k + 3) .... xlen = 1 + */ + + if ((i % 4) == 3) + xlen = 1; + else + xlen = (i % 4) + 2; + + data >>= (4 - xlen) * 8; + + dev_dbg(i2c->dev, + "PIO: len=%i pos=%i total=%i [W%s%s%s]\n", + xlen, i, msg->len, + start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "", + start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "", + start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : ""); + writel(MXS_I2C_DEBUG0_DMAREQ, - i2c->regs + MXS_I2C_DEBUG0_CLR); + i2c->regs + MXS_I2C_DEBUG0_CLR(i2c)); + + mxs_i2c_pio_trigger_write_cmd(i2c, + start | MXS_I2C_CTRL0_MASTER_MODE | + MXS_I2C_CTRL0_DIRECTION | + MXS_I2C_CTRL0_XFER_COUNT(xlen), data); + + /* The START condition is sent only once. */ + start &= ~MXS_I2C_CTRL0_PRE_SEND_START; + + /* Wait for the end of the transfer. */ + ret = mxs_i2c_pio_wait_xfer_end(i2c); + if (ret) { + dev_err(i2c->dev, + "PIO: Failed to finish WRITE cmd!\n"); + break; + } + + /* Check NAK here. */ + ret = readl(i2c->regs + MXS_I2C_STAT) & + MXS_I2C_STAT_GOT_A_NAK; + if (ret) { + ret = -ENXIO; + goto cleanup; + } } } - ret = mxs_i2c_pio_wait_cplt(i2c, flags & MXS_I2C_CTRL0_POST_SEND_STOP); - if (ret) - return ret; - /* make sure we capture any occurred error into cmd_err */ - mxs_i2c_pio_check_error_state(i2c); + ret = mxs_i2c_pio_check_error_state(i2c); cleanup: /* Clear any dangling IRQs and re-enable interrupts. */ writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR); writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); - return 0; + /* Clear the PIO_MODE on i.MX23 */ + if (i2c->dev_type == MXS_I2C_V1) + writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_CLR); + + return ret; } /* @@ -479,8 +562,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); - int ret, err; + int ret; int flags; + int use_pio = 0; flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; @@ -491,19 +575,21 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, return -EINVAL; /* - * The current boundary to select between PIO/DMA transfer method - * is set to 8 bytes, transfers shorter than 8 bytes are transfered - * using PIO mode while longer transfers use DMA. The 8 byte border is - * based on this empirical measurement and a lot of previous frobbing. + * The MX28 I2C IP block can only do PIO READ for transfer of to up + * 4 bytes of length. The write transfer is not limited as it can use + * clock stretching to avoid FIFO underruns. */ + if ((msg->flags & I2C_M_RD) && (msg->len <= 4)) + use_pio = 1; + if (!(msg->flags & I2C_M_RD) && (msg->len < 7)) + use_pio = 1; + i2c->cmd_err = 0; - if (0) { /* disable PIO mode until a proper fix is made */ + if (use_pio) { ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); - if (ret) { - err = mxs_i2c_reset(i2c); - if (err) - return err; - } + /* No need to reset the block if NAK was received. */ + if (ret && (ret != -ENXIO)) + mxs_i2c_reset(i2c); } else { reinit_completion(&i2c->cmd_complete); ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); @@ -514,9 +600,11 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, msecs_to_jiffies(1000)); if (ret == 0) goto timeout; + + ret = i2c->cmd_err; } - if (i2c->cmd_err == -ENXIO) { + if (ret == -ENXIO) { /* * If the transfer fails with a NAK from the slave the * controller halts until it gets told to return to idle state. @@ -525,7 +613,19 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, i2c->regs + MXS_I2C_CTRL1_SET); } - ret = i2c->cmd_err; + /* + * WARNING! + * The i.MX23 is strange. After each and every operation, it's I2C IP + * block must be reset, otherwise the IP block will misbehave. This can + * be observed on the bus by the block sending out one single byte onto + * the bus. In case such an error happens, bit 27 will be set in the + * DEBUG0 register. This bit is not documented in the i.MX23 datasheet + * and is marked as "TBD" instead. To reset this bit to a correct state, + * reset the whole block. Since the block reset does not take long, do + * reset the block after every transfer to play safe. + */ + if (i2c->dev_type == MXS_I2C_V1) + mxs_i2c_reset(i2c); dev_dbg(i2c->dev, "Done with err=%d\n", ret); @@ -680,8 +780,28 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c) return 0; } +static struct platform_device_id mxs_i2c_devtype[] = { + { + .name = "imx23-i2c", + .driver_data = MXS_I2C_V1, + }, { + .name = "imx28-i2c", + .driver_data = MXS_I2C_V2, + }, { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, mxs_i2c_devtype); + +static const struct of_device_id mxs_i2c_dt_ids[] = { + { .compatible = "fsl,imx23-i2c", .data = &mxs_i2c_devtype[0], }, + { .compatible = "fsl,imx28-i2c", .data = &mxs_i2c_devtype[1], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids); + static int mxs_i2c_probe(struct platform_device *pdev) { + const struct of_device_id *of_id = + of_match_device(mxs_i2c_dt_ids, &pdev->dev); struct device *dev = &pdev->dev; struct mxs_i2c_dev *i2c; struct i2c_adapter *adap; @@ -693,6 +813,11 @@ static int mxs_i2c_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; + if (of_id) { + const struct platform_device_id *device_id = of_id->data; + i2c->dev_type = device_id->driver_data; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); @@ -768,12 +893,6 @@ static int mxs_i2c_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mxs_i2c_dt_ids[] = { - { .compatible = "fsl,imx28-i2c", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids); - static struct platform_driver mxs_i2c_driver = { .driver = { .name = DRIVER_NAME, @@ -796,6 +915,7 @@ static void __exit mxs_i2c_exit(void) } module_exit(mxs_i2c_exit); +MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); MODULE_DESCRIPTION("MXS I2C Bus Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 1a9ea25f2314..c9a352f0a9a5 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -23,6 +23,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> +#include <linux/of.h> #define I2C_PNX_TIMEOUT_DEFAULT 10 /* msec */ #define I2C_PNX_SPEED_KHZ_DEFAULT 100 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d2fe11da5e82..2c2fd7c2b116 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -33,6 +33,7 @@ #include <linux/i2c/i2c-rcar.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -102,8 +103,8 @@ enum { #define ID_NACK (1 << 4) enum rcar_i2c_type { - I2C_RCAR_H1, - I2C_RCAR_H2, + I2C_RCAR_GEN1, + I2C_RCAR_GEN2, }; struct rcar_i2c_priv { @@ -226,22 +227,23 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, u32 bus_speed, struct device *dev) { - struct clk *clkp = clk_get(NULL, "peripheral_clk"); + struct clk *clkp = clk_get(dev, NULL); u32 scgd, cdf; u32 round, ick; u32 scl; u32 cdf_width; + unsigned long rate; - if (!clkp) { - dev_err(dev, "there is no peripheral_clk\n"); - return -EIO; + if (IS_ERR(clkp)) { + dev_err(dev, "couldn't get clock\n"); + return PTR_ERR(clkp); } switch (priv->devtype) { - case I2C_RCAR_H1: + case I2C_RCAR_GEN1: cdf_width = 2; break; - case I2C_RCAR_H2: + case I2C_RCAR_GEN2: cdf_width = 3; break; default: @@ -264,15 +266,14 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, * clkp : peripheral_clk * F[] : integer up-valuation */ - for (cdf = 0; cdf < (1 << cdf_width); cdf++) { - ick = clk_get_rate(clkp) / (1 + cdf); - if (ick < 20000000) - goto ick_find; + rate = clk_get_rate(clkp); + cdf = rate / 20000000; + if (cdf >= 1 << cdf_width) { + dev_err(dev, "Input clock %lu too high\n", rate); + return -EIO; } - dev_err(dev, "there is no best CDF\n"); - return -EIO; + ick = rate / (cdf + 1); -ick_find: /* * it is impossible to calculate large scale * number on u32. separate it @@ -290,6 +291,12 @@ ick_find: * * Calculation result (= SCL) should be less than * bus_speed for hardware safety + * + * We could use something along the lines of + * div = ick / (bus_speed + 1) + 1; + * scgd = (div - 20 - round + 7) / 8; + * scl = ick / (20 + (scgd * 8) + round); + * (not fully verified) but that would get pretty involved */ for (scgd = 0; scgd < 0x40; scgd++) { scl = ick / (20 + (scgd * 8) + round); @@ -306,7 +313,7 @@ scgd_find: /* * keep icccr value */ - priv->icccr = (scgd << (cdf_width) | cdf); + priv->icccr = scgd << cdf_width | cdf; return 0; } @@ -632,6 +639,15 @@ static const struct i2c_algorithm rcar_i2c_algo = { .functionality = rcar_i2c_func, }; +static const struct of_device_id rcar_i2c_dt_ids[] = { + { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 }, + { .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 }, + { .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 }, + { .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 }, + {}, +}; +MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids); + static int rcar_i2c_probe(struct platform_device *pdev) { struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -649,10 +665,15 @@ static int rcar_i2c_probe(struct platform_device *pdev) } bus_speed = 100000; /* default 100 kHz */ - if (pdata && pdata->bus_speed) + ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed); + if (ret < 0 && pdata && pdata->bus_speed) bus_speed = pdata->bus_speed; - priv->devtype = platform_get_device_id(pdev)->driver_data; + if (pdev->dev.of_node) + priv->devtype = (long)of_match_device(rcar_i2c_dt_ids, + dev)->data; + else + priv->devtype = platform_get_device_id(pdev)->driver_data; ret = rcar_i2c_clock_calculate(priv, bus_speed, dev); if (ret < 0) @@ -673,6 +694,7 @@ static int rcar_i2c_probe(struct platform_device *pdev) adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->retries = 3; adap->dev.parent = dev; + adap->dev.of_node = dev->of_node; i2c_set_adapdata(adap, priv); strlcpy(adap->name, pdev->name, sizeof(adap->name)); @@ -709,9 +731,9 @@ static int rcar_i2c_remove(struct platform_device *pdev) } static struct platform_device_id rcar_i2c_id_table[] = { - { "i2c-rcar", I2C_RCAR_H1 }, - { "i2c-rcar_h1", I2C_RCAR_H1 }, - { "i2c-rcar_h2", I2C_RCAR_H2 }, + { "i2c-rcar", I2C_RCAR_GEN1 }, + { "i2c-rcar_gen1", I2C_RCAR_GEN1 }, + { "i2c-rcar_gen2", I2C_RCAR_GEN2 }, {}, }; MODULE_DEVICE_TABLE(platform, rcar_i2c_id_table); @@ -720,6 +742,7 @@ static struct platform_driver rcar_i2c_driver = { .driver = { .name = "i2c-rcar", .owner = THIS_MODULE, + .of_match_table = rcar_i2c_dt_ids, }, .probe = rcar_i2c_probe, .remove = rcar_i2c_remove, diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 3747b9bf67d6..bf8fb94ebc5d 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -36,6 +36,7 @@ #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/pinctrl/consumer.h> diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index c447e8d40b78..599235514138 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -223,7 +223,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, goto out; obj = pkg->package.elements + 1; - if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "Invalid argument type")); result = -EIO; goto out; @@ -235,7 +235,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: case I2C_SMBUS_WORD_DATA: - if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "Invalid argument type")); result = -EIO; goto out; @@ -246,7 +246,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, data->byte = obj->integer.value; break; case I2C_SMBUS_BLOCK_DATA: - if (obj == NULL || obj->type != ACPI_TYPE_BUFFER) { + if (obj->type != ACPI_TYPE_BUFFER) { ACPI_ERROR((AE_INFO, "Invalid argument type")); result = -EIO; goto out; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 55110ddbed1f..1d79585ba4b3 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -235,7 +235,7 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) int offset; /* Get clock rate after clock is enabled */ - clk_enable(pd->clk); + clk_prepare_enable(pd->clk); i2c_clk_khz = clk_get_rate(pd->clk) / 1000; i2c_clk_khz /= pd->clks_per_count; @@ -270,14 +270,14 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) pd->icic &= ~ICIC_ICCHB8; out: - clk_disable(pd->clk); + clk_disable_unprepare(pd->clk); } static void activate_ch(struct sh_mobile_i2c_data *pd) { /* Wake up device and enable clock */ pm_runtime_get_sync(pd->dev); - clk_enable(pd->clk); + clk_prepare_enable(pd->clk); /* Enable channel and configure rx ack */ iic_set_clr(pd, ICCR, ICCR_ICE, 0); @@ -300,7 +300,7 @@ static void deactivate_ch(struct sh_mobile_i2c_data *pd) iic_set_clr(pd, ICCR, 0, ICCR_ICE); /* Disable clock and mark device as idle */ - clk_disable(pd->clk); + clk_disable_unprepare(pd->clk); pm_runtime_put_sync(pd->dev); } diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c new file mode 100644 index 000000000000..9cf715d69551 --- /dev/null +++ b/drivers/i2c/busses/i2c-st.c @@ -0,0 +1,872 @@ +/* + * Copyright (C) 2013 STMicroelectronics + * + * I2C master mode controller driver, used in STMicroelectronics devices. + * + * Author: Maxime Coquelin <maxime.coquelin@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/i2c.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +/* SSC registers */ +#define SSC_BRG 0x000 +#define SSC_TBUF 0x004 +#define SSC_RBUF 0x008 +#define SSC_CTL 0x00C +#define SSC_IEN 0x010 +#define SSC_STA 0x014 +#define SSC_I2C 0x018 +#define SSC_SLAD 0x01C +#define SSC_REP_START_HOLD 0x020 +#define SSC_START_HOLD 0x024 +#define SSC_REP_START_SETUP 0x028 +#define SSC_DATA_SETUP 0x02C +#define SSC_STOP_SETUP 0x030 +#define SSC_BUS_FREE 0x034 +#define SSC_TX_FSTAT 0x038 +#define SSC_RX_FSTAT 0x03C +#define SSC_PRE_SCALER_BRG 0x040 +#define SSC_CLR 0x080 +#define SSC_NOISE_SUPP_WIDTH 0x100 +#define SSC_PRSCALER 0x104 +#define SSC_NOISE_SUPP_WIDTH_DATAOUT 0x108 +#define SSC_PRSCALER_DATAOUT 0x10c + +/* SSC Control */ +#define SSC_CTL_DATA_WIDTH_9 0x8 +#define SSC_CTL_DATA_WIDTH_MSK 0xf +#define SSC_CTL_BM 0xf +#define SSC_CTL_HB BIT(4) +#define SSC_CTL_PH BIT(5) +#define SSC_CTL_PO BIT(6) +#define SSC_CTL_SR BIT(7) +#define SSC_CTL_MS BIT(8) +#define SSC_CTL_EN BIT(9) +#define SSC_CTL_LPB BIT(10) +#define SSC_CTL_EN_TX_FIFO BIT(11) +#define SSC_CTL_EN_RX_FIFO BIT(12) +#define SSC_CTL_EN_CLST_RX BIT(13) + +/* SSC Interrupt Enable */ +#define SSC_IEN_RIEN BIT(0) +#define SSC_IEN_TIEN BIT(1) +#define SSC_IEN_TEEN BIT(2) +#define SSC_IEN_REEN BIT(3) +#define SSC_IEN_PEEN BIT(4) +#define SSC_IEN_AASEN BIT(6) +#define SSC_IEN_STOPEN BIT(7) +#define SSC_IEN_ARBLEN BIT(8) +#define SSC_IEN_NACKEN BIT(10) +#define SSC_IEN_REPSTRTEN BIT(11) +#define SSC_IEN_TX_FIFO_HALF BIT(12) +#define SSC_IEN_RX_FIFO_HALF_FULL BIT(14) + +/* SSC Status */ +#define SSC_STA_RIR BIT(0) +#define SSC_STA_TIR BIT(1) +#define SSC_STA_TE BIT(2) +#define SSC_STA_RE BIT(3) +#define SSC_STA_PE BIT(4) +#define SSC_STA_CLST BIT(5) +#define SSC_STA_AAS BIT(6) +#define SSC_STA_STOP BIT(7) +#define SSC_STA_ARBL BIT(8) +#define SSC_STA_BUSY BIT(9) +#define SSC_STA_NACK BIT(10) +#define SSC_STA_REPSTRT BIT(11) +#define SSC_STA_TX_FIFO_HALF BIT(12) +#define SSC_STA_TX_FIFO_FULL BIT(13) +#define SSC_STA_RX_FIFO_HALF BIT(14) + +/* SSC I2C Control */ +#define SSC_I2C_I2CM BIT(0) +#define SSC_I2C_STRTG BIT(1) +#define SSC_I2C_STOPG BIT(2) +#define SSC_I2C_ACKG BIT(3) +#define SSC_I2C_AD10 BIT(4) +#define SSC_I2C_TXENB BIT(5) +#define SSC_I2C_REPSTRTG BIT(11) +#define SSC_I2C_SLAVE_DISABLE BIT(12) + +/* SSC Tx FIFO Status */ +#define SSC_TX_FSTAT_STATUS 0x07 + +/* SSC Rx FIFO Status */ +#define SSC_RX_FSTAT_STATUS 0x07 + +/* SSC Clear bit operation */ +#define SSC_CLR_SSCAAS BIT(6) +#define SSC_CLR_SSCSTOP BIT(7) +#define SSC_CLR_SSCARBL BIT(8) +#define SSC_CLR_NACK BIT(10) +#define SSC_CLR_REPSTRT BIT(11) + +/* SSC Clock Prescaler */ +#define SSC_PRSC_VALUE 0x0f + + +#define SSC_TXFIFO_SIZE 0x8 +#define SSC_RXFIFO_SIZE 0x8 + +enum st_i2c_mode { + I2C_MODE_STANDARD, + I2C_MODE_FAST, + I2C_MODE_END, +}; + +/** + * struct st_i2c_timings - per-Mode tuning parameters + * @rate: I2C bus rate + * @rep_start_hold: I2C repeated start hold time requirement + * @rep_start_setup: I2C repeated start set up time requirement + * @start_hold: I2C start hold time requirement + * @data_setup_time: I2C data set up time requirement + * @stop_setup_time: I2C stop set up time requirement + * @bus_free_time: I2C bus free time requirement + * @sda_pulse_min_limit: I2C SDA pulse mini width limit + */ +struct st_i2c_timings { + u32 rate; + u32 rep_start_hold; + u32 rep_start_setup; + u32 start_hold; + u32 data_setup_time; + u32 stop_setup_time; + u32 bus_free_time; + u32 sda_pulse_min_limit; +}; + +/** + * struct st_i2c_client - client specific data + * @addr: 8-bit slave addr, including r/w bit + * @count: number of bytes to be transfered + * @xfered: number of bytes already transferred + * @buf: data buffer + * @result: result of the transfer + * @stop: last I2C msg to be sent, i.e. STOP to be generated + */ +struct st_i2c_client { + u8 addr; + u32 count; + u32 xfered; + u8 *buf; + int result; + bool stop; +}; + +/** + * struct st_i2c_dev - private data of the controller + * @adap: I2C adapter for this controller + * @dev: device for this controller + * @base: virtual memory area + * @complete: completion of I2C message + * @irq: interrupt line for th controller + * @clk: hw ssc block clock + * @mode: I2C mode of the controller. Standard or Fast only supported + * @scl_min_width_us: SCL line minimum pulse width in us + * @sda_min_width_us: SDA line minimum pulse width in us + * @client: I2C transfert information + * @busy: I2C transfer on-going + */ +struct st_i2c_dev { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + struct completion complete; + int irq; + struct clk *clk; + int mode; + u32 scl_min_width_us; + u32 sda_min_width_us; + struct st_i2c_client client; + bool busy; +}; + +static inline void st_i2c_set_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) | mask, reg); +} + +static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) & ~mask, reg); +} + +/* From I2C Specifications v0.5 */ +static struct st_i2c_timings i2c_timings[] = { + [I2C_MODE_STANDARD] = { + .rate = 100000, + .rep_start_hold = 4000, + .rep_start_setup = 4700, + .start_hold = 4000, + .data_setup_time = 250, + .stop_setup_time = 4000, + .bus_free_time = 4700, + }, + [I2C_MODE_FAST] = { + .rate = 400000, + .rep_start_hold = 600, + .rep_start_setup = 600, + .start_hold = 600, + .data_setup_time = 100, + .stop_setup_time = 600, + .bus_free_time = 1300, + }, +}; + +static void st_i2c_flush_rx_fifo(struct st_i2c_dev *i2c_dev) +{ + int count, i; + + /* + * Counter only counts up to 7 but fifo size is 8... + * When fifo is full, counter is 0 and RIR bit of status register is + * set + */ + if (readl_relaxed(i2c_dev->base + SSC_STA) & SSC_STA_RIR) + count = SSC_RXFIFO_SIZE; + else + count = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT) & + SSC_RX_FSTAT_STATUS; + + for (i = 0; i < count; i++) + readl_relaxed(i2c_dev->base + SSC_RBUF); +} + +static void st_i2c_soft_reset(struct st_i2c_dev *i2c_dev) +{ + /* + * FIFO needs to be emptied before reseting the IP, + * else the controller raises a BUSY error. + */ + st_i2c_flush_rx_fifo(i2c_dev); + + st_i2c_set_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); + st_i2c_clr_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); +} + +/** + * st_i2c_hw_config() - Prepare SSC block, calculate and apply tuning timings + * @i2c_dev: Controller's private data + */ +static void st_i2c_hw_config(struct st_i2c_dev *i2c_dev) +{ + unsigned long rate; + u32 val, ns_per_clk; + struct st_i2c_timings *t = &i2c_timings[i2c_dev->mode]; + + st_i2c_soft_reset(i2c_dev); + + val = SSC_CLR_REPSTRT | SSC_CLR_NACK | SSC_CLR_SSCARBL | + SSC_CLR_SSCAAS | SSC_CLR_SSCSTOP; + writel_relaxed(val, i2c_dev->base + SSC_CLR); + + /* SSC Control register setup */ + val = SSC_CTL_PO | SSC_CTL_PH | SSC_CTL_HB | SSC_CTL_DATA_WIDTH_9; + writel_relaxed(val, i2c_dev->base + SSC_CTL); + + rate = clk_get_rate(i2c_dev->clk); + ns_per_clk = 1000000000 / rate; + + /* Baudrate */ + val = rate / (2 * t->rate); + writel_relaxed(val, i2c_dev->base + SSC_BRG); + + /* Pre-scaler baudrate */ + writel_relaxed(1, i2c_dev->base + SSC_PRE_SCALER_BRG); + + /* Enable I2C mode */ + writel_relaxed(SSC_I2C_I2CM, i2c_dev->base + SSC_I2C); + + /* Repeated start hold time */ + val = t->rep_start_hold / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_REP_START_HOLD); + + /* Repeated start set up time */ + val = t->rep_start_setup / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_REP_START_SETUP); + + /* Start hold time */ + val = t->start_hold / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_START_HOLD); + + /* Data set up time */ + val = t->data_setup_time / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_DATA_SETUP); + + /* Stop set up time */ + val = t->stop_setup_time / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_STOP_SETUP); + + /* Bus free time */ + val = t->bus_free_time / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_BUS_FREE); + + /* Prescalers set up */ + val = rate / 10000000; + writel_relaxed(val, i2c_dev->base + SSC_PRSCALER); + writel_relaxed(val, i2c_dev->base + SSC_PRSCALER_DATAOUT); + + /* Noise suppression witdh */ + val = i2c_dev->scl_min_width_us * rate / 100000000; + writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH); + + /* Noise suppression max output data delay width */ + val = i2c_dev->sda_min_width_us * rate / 100000000; + writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH_DATAOUT); +} + +static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev) +{ + u32 sta; + int i; + + for (i = 0; i < 10; i++) { + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (!(sta & SSC_STA_BUSY)) + return 0; + + usleep_range(2000, 4000); + } + + dev_err(i2c_dev->dev, "bus not free (status = 0x%08x)\n", sta); + + return -EBUSY; +} + +/** + * st_i2c_write_tx_fifo() - Write a byte in the Tx FIFO + * @i2c_dev: Controller's private data + * @byte: Data to write in the Tx FIFO + */ +static inline void st_i2c_write_tx_fifo(struct st_i2c_dev *i2c_dev, u8 byte) +{ + u16 tbuf = byte << 1; + + writel_relaxed(tbuf | 1, i2c_dev->base + SSC_TBUF); +} + +/** + * st_i2c_wr_fill_tx_fifo() - Fill the Tx FIFO in write mode + * @i2c_dev: Controller's private data + * + * This functions fills the Tx FIFO with I2C transfert buffer when + * in write mode. + */ +static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 tx_fstat, sta; + int i; + + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (sta & SSC_STA_TX_FIFO_FULL) + return; + + tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); + tx_fstat &= SSC_TX_FSTAT_STATUS; + + if (c->count < (SSC_TXFIFO_SIZE - tx_fstat)) + i = c->count; + else + i = SSC_TXFIFO_SIZE - tx_fstat; + + for (; i > 0; i--, c->count--, c->buf++) + st_i2c_write_tx_fifo(i2c_dev, *c->buf); +} + +/** + * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode + * @i2c_dev: Controller's private data + * + * This functions fills the Tx FIFO with fixed pattern when + * in read mode to trigger clock. + */ +static void st_i2c_rd_fill_tx_fifo(struct st_i2c_dev *i2c_dev, int max) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 tx_fstat, sta; + int i; + + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (sta & SSC_STA_TX_FIFO_FULL) + return; + + tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); + tx_fstat &= SSC_TX_FSTAT_STATUS; + + if (max < (SSC_TXFIFO_SIZE - tx_fstat)) + i = max; + else + i = SSC_TXFIFO_SIZE - tx_fstat; + + for (; i > 0; i--, c->xfered++) + st_i2c_write_tx_fifo(i2c_dev, 0xff); +} + +static void st_i2c_read_rx_fifo(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 i, sta; + u16 rbuf; + + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (sta & SSC_STA_RIR) { + i = SSC_RXFIFO_SIZE; + } else { + i = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT); + i &= SSC_RX_FSTAT_STATUS; + } + + for (; (i > 0) && (c->count > 0); i--, c->count--) { + rbuf = readl_relaxed(i2c_dev->base + SSC_RBUF) >> 1; + *c->buf++ = (u8)rbuf & 0xff; + } + + if (i) { + dev_err(i2c_dev->dev, "Unexpected %d bytes in rx fifo\n", i); + st_i2c_flush_rx_fifo(i2c_dev); + } +} + +/** + * st_i2c_terminate_xfer() - Send either STOP or REPSTART condition + * @i2c_dev: Controller's private data + */ +static void st_i2c_terminate_xfer(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + + st_i2c_clr_bits(i2c_dev->base + SSC_IEN, SSC_IEN_TEEN); + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); + + if (c->stop) { + st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_STOPEN); + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); + } else { + st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_REPSTRTEN); + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_REPSTRTG); + } +} + +/** + * st_i2c_handle_write() - Handle FIFO empty interrupt in case of write + * @i2c_dev: Controller's private data + */ +static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + + st_i2c_flush_rx_fifo(i2c_dev); + + if (!c->count) + /* End of xfer, send stop or repstart */ + st_i2c_terminate_xfer(i2c_dev); + else + st_i2c_wr_fill_tx_fifo(i2c_dev); +} + +/** + * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read + * @i2c_dev: Controller's private data + */ +static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 ien; + + /* Trash the address read back */ + if (!c->xfered) { + readl_relaxed(i2c_dev->base + SSC_RBUF); + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_TXENB); + } else { + st_i2c_read_rx_fifo(i2c_dev); + } + + if (!c->count) { + /* End of xfer, send stop or repstart */ + st_i2c_terminate_xfer(i2c_dev); + } else if (c->count == 1) { + /* Penultimate byte to xfer, disable ACK gen. */ + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_ACKG); + + /* Last received byte is to be handled by NACK interrupt */ + ien = SSC_IEN_NACKEN | SSC_IEN_ARBLEN; + writel_relaxed(ien, i2c_dev->base + SSC_IEN); + + st_i2c_rd_fill_tx_fifo(i2c_dev, c->count); + } else { + st_i2c_rd_fill_tx_fifo(i2c_dev, c->count - 1); + } +} + +/** + * st_i2c_isr() - Interrupt routine + * @irq: interrupt number + * @data: Controller's private data + */ +static irqreturn_t st_i2c_isr_thread(int irq, void *data) +{ + struct st_i2c_dev *i2c_dev = data; + struct st_i2c_client *c = &i2c_dev->client; + u32 sta, ien; + int it; + + ien = readl_relaxed(i2c_dev->base + SSC_IEN); + sta = readl_relaxed(i2c_dev->base + SSC_STA); + + /* Use __fls() to check error bits first */ + it = __fls(sta & ien); + if (it < 0) { + dev_dbg(i2c_dev->dev, "spurious it (sta=0x%04x, ien=0x%04x)\n", + sta, ien); + return IRQ_NONE; + } + + switch (1 << it) { + case SSC_STA_TE: + if (c->addr & I2C_M_RD) + st_i2c_handle_read(i2c_dev); + else + st_i2c_handle_write(i2c_dev); + break; + + case SSC_STA_STOP: + case SSC_STA_REPSTRT: + writel_relaxed(0, i2c_dev->base + SSC_IEN); + complete(&i2c_dev->complete); + break; + + case SSC_STA_NACK: + writel_relaxed(SSC_CLR_NACK, i2c_dev->base + SSC_CLR); + + /* Last received byte handled by NACK interrupt */ + if ((c->addr & I2C_M_RD) && (c->count == 1) && (c->xfered)) { + st_i2c_handle_read(i2c_dev); + break; + } + + it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; + writel_relaxed(it, i2c_dev->base + SSC_IEN); + + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); + c->result = -EIO; + break; + + case SSC_STA_ARBL: + writel_relaxed(SSC_CLR_SSCARBL, i2c_dev->base + SSC_CLR); + + it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; + writel_relaxed(it, i2c_dev->base + SSC_IEN); + + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); + c->result = -EIO; + break; + + default: + dev_err(i2c_dev->dev, + "it %d unhandled (sta=0x%04x)\n", it, sta); + } + + /* + * Read IEN register to ensure interrupt mask write is effective + * before re-enabling interrupt at GIC level, and thus avoid spurious + * interrupts. + */ + readl(i2c_dev->base + SSC_IEN); + + return IRQ_HANDLED; +} + +/** + * st_i2c_xfer_msg() - Transfer a single I2C message + * @i2c_dev: Controller's private data + * @msg: I2C message to transfer + * @is_first: first message of the sequence + * @is_last: last message of the sequence + */ +static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg, + bool is_first, bool is_last) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 ctl, i2c, it; + unsigned long timeout; + int ret; + + c->addr = (u8)(msg->addr << 1); + c->addr |= (msg->flags & I2C_M_RD); + c->buf = msg->buf; + c->count = msg->len; + c->xfered = 0; + c->result = 0; + c->stop = is_last; + + reinit_completion(&i2c_dev->complete); + + ctl = SSC_CTL_EN | SSC_CTL_MS | SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO; + st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl); + + i2c = SSC_I2C_TXENB; + if (c->addr & I2C_M_RD) + i2c |= SSC_I2C_ACKG; + st_i2c_set_bits(i2c_dev->base + SSC_I2C, i2c); + + /* Write slave address */ + st_i2c_write_tx_fifo(i2c_dev, c->addr); + + /* Pre-fill Tx fifo with data in case of write */ + if (!(c->addr & I2C_M_RD)) + st_i2c_wr_fill_tx_fifo(i2c_dev); + + it = SSC_IEN_NACKEN | SSC_IEN_TEEN | SSC_IEN_ARBLEN; + writel_relaxed(it, i2c_dev->base + SSC_IEN); + + if (is_first) { + ret = st_i2c_wait_free_bus(i2c_dev); + if (ret) + return ret; + + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); + } + + timeout = wait_for_completion_timeout(&i2c_dev->complete, + i2c_dev->adap.timeout); + ret = c->result; + + if (!timeout) { + dev_err(i2c_dev->dev, "Write to slave 0x%x timed out\n", + c->addr); + ret = -ETIMEDOUT; + } + + i2c = SSC_I2C_STOPG | SSC_I2C_REPSTRTG; + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, i2c); + + writel_relaxed(SSC_CLR_SSCSTOP | SSC_CLR_REPSTRT, + i2c_dev->base + SSC_CLR); + + return ret; +} + +/** + * st_i2c_xfer() - Transfer a single I2C message + * @i2c_adap: Adapter pointer to the controller + * @msgs: Pointer to data to be written. + * @num: Number of messages to be executed + */ +static int st_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) +{ + struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); + int ret, i; + + i2c_dev->busy = true; + + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) { + dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n"); + return ret; + } + + pinctrl_pm_select_default_state(i2c_dev->dev); + + st_i2c_hw_config(i2c_dev); + + for (i = 0; (i < num) && !ret; i++) + ret = st_i2c_xfer_msg(i2c_dev, &msgs[i], i == 0, i == num - 1); + + pinctrl_pm_select_idle_state(i2c_dev->dev); + + clk_disable_unprepare(i2c_dev->clk); + + i2c_dev->busy = false; + + return (ret < 0) ? ret : i; +} + +#ifdef CONFIG_PM_SLEEP +static int st_i2c_suspend(struct device *dev) +{ + struct platform_device *pdev = + container_of(dev, struct platform_device, dev); + struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + if (i2c_dev->busy) + return -EBUSY; + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int st_i2c_resume(struct device *dev) +{ + pinctrl_pm_select_default_state(dev); + /* Go in idle state if available */ + pinctrl_pm_select_idle_state(dev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(st_i2c_pm, st_i2c_suspend, st_i2c_resume); +#define ST_I2C_PM (&st_i2c_pm) +#else +#define ST_I2C_PM NULL +#endif + +static u32 st_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm st_i2c_algo = { + .master_xfer = st_i2c_xfer, + .functionality = st_i2c_func, +}; + +static int st_i2c_of_get_deglitch(struct device_node *np, + struct st_i2c_dev *i2c_dev) +{ + int ret; + + ret = of_property_read_u32(np, "st,i2c-min-scl-pulse-width-us", + &i2c_dev->scl_min_width_us); + if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { + dev_err(i2c_dev->dev, "st,i2c-min-scl-pulse-width-us invalid\n"); + return ret; + } + + ret = of_property_read_u32(np, "st,i2c-min-sda-pulse-width-us", + &i2c_dev->sda_min_width_us); + if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { + dev_err(i2c_dev->dev, "st,i2c-min-sda-pulse-width-us invalid\n"); + return ret; + } + + return 0; +} + +static int st_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct st_i2c_dev *i2c_dev; + struct resource *res; + u32 clk_rate; + struct i2c_adapter *adap; + int ret; + + i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); + if (!i2c_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c_dev->base)) + return PTR_ERR(i2c_dev->base); + + i2c_dev->irq = irq_of_parse_and_map(np, 0); + if (!i2c_dev->irq) { + dev_err(&pdev->dev, "IRQ missing or invalid\n"); + return -EINVAL; + } + + i2c_dev->clk = of_clk_get_by_name(np, "ssc"); + if (IS_ERR(i2c_dev->clk)) { + dev_err(&pdev->dev, "Unable to request clock\n"); + return PTR_ERR(i2c_dev->clk); + } + + i2c_dev->mode = I2C_MODE_STANDARD; + ret = of_property_read_u32(np, "clock-frequency", &clk_rate); + if ((!ret) && (clk_rate == 400000)) + i2c_dev->mode = I2C_MODE_FAST; + + i2c_dev->dev = &pdev->dev; + + ret = devm_request_threaded_irq(&pdev->dev, i2c_dev->irq, + NULL, st_i2c_isr_thread, + IRQF_ONESHOT, pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); + return ret; + } + + pinctrl_pm_select_default_state(i2c_dev->dev); + /* In case idle state available, select it */ + pinctrl_pm_select_idle_state(i2c_dev->dev); + + ret = st_i2c_of_get_deglitch(np, i2c_dev); + if (ret) + return ret; + + adap = &i2c_dev->adap; + i2c_set_adapdata(adap, i2c_dev); + snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%x)", res->start); + adap->owner = THIS_MODULE; + adap->timeout = 2 * HZ; + adap->retries = 0; + adap->algo = &st_i2c_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + init_completion(&i2c_dev->complete); + + ret = i2c_add_adapter(adap); + if (ret) { + dev_err(&pdev->dev, "Failed to add adapter\n"); + return ret; + } + + platform_set_drvdata(pdev, i2c_dev); + + dev_info(i2c_dev->dev, "%s initialized\n", adap->name); + + return 0; +} + +static int st_i2c_remove(struct platform_device *pdev) +{ + struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c_dev->adap); + + return 0; +} + +static struct of_device_id st_i2c_match[] = { + { .compatible = "st,comms-ssc-i2c", }, + { .compatible = "st,comms-ssc4-i2c", }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_i2c_match); + +static struct platform_driver st_i2c_driver = { + .driver = { + .name = "st-i2c", + .owner = THIS_MODULE, + .of_match_table = st_i2c_match, + .pm = ST_I2C_PM, + }, + .probe = st_i2c_probe, + .remove = st_i2c_remove, +}; + +module_platform_driver(st_i2c_driver); + +MODULE_AUTHOR("Maxime Coquelin <maxime.coquelin@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics I2C driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c index 31395fa8121d..2c8a3e4f9008 100644 --- a/drivers/i2c/busses/i2c-wmt.c +++ b/drivers/i2c/busses/i2c-wmt.c @@ -349,6 +349,7 @@ static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev) err = clk_set_rate(i2c_dev->clk, 20000000); if (err) { dev_err(i2c_dev->dev, "failed to set clock = 20Mhz\n"); + clk_disable_unprepare(i2c_dev->clk); return err; } diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 4c8b368d463b..fc2716afdfd9 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -40,6 +40,7 @@ #include <linux/i2c-xiic.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/of.h> #define DRIVER_NAME "xiic-i2c" @@ -702,7 +703,7 @@ static int xiic_i2c_probe(struct platform_device *pdev) if (irq < 0) goto resource_missing; - pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev); + pdata = dev_get_platdata(&pdev->dev); i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); if (!i2c) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 75ba8608383e..d74c0b34248e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -248,7 +248,7 @@ static int i2c_device_probe(struct device *dev) driver = to_i2c_driver(dev->driver); if (!driver->probe || !driver->id_table) return -ENODEV; - client->driver = driver; + if (!device_can_wakeup(&client->dev)) device_init_wakeup(&client->dev, client->flags & I2C_CLIENT_WAKE); @@ -257,7 +257,6 @@ static int i2c_device_probe(struct device *dev) acpi_dev_pm_attach(&client->dev, true); status = driver->probe(client, i2c_match_id(driver->id_table, client)); if (status) { - client->driver = NULL; i2c_set_clientdata(client, NULL); acpi_dev_pm_detach(&client->dev, true); } @@ -281,10 +280,8 @@ static int i2c_device_remove(struct device *dev) dev->driver = NULL; status = 0; } - if (status == 0) { - client->driver = NULL; + if (status == 0) i2c_set_clientdata(client, NULL); - } acpi_dev_pm_detach(&client->dev, true); return status; } @@ -618,6 +615,22 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter) } EXPORT_SYMBOL_GPL(i2c_unlock_adapter); +static void i2c_dev_set_name(struct i2c_adapter *adap, + struct i2c_client *client) +{ + struct acpi_device *adev = ACPI_COMPANION(&client->dev); + + if (adev) { + dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev)); + return; + } + + /* For 10-bit clients, add an arbitrary offset to avoid collisions */ + dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), + client->addr | ((client->flags & I2C_CLIENT_TEN) + ? 0xa000 : 0)); +} + /** * i2c_new_device - instantiate an i2c device * @adap: the adapter managing the device @@ -674,12 +687,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; client->dev.of_node = info->of_node; - ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle); + ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion); - /* For 10-bit clients, add an arbitrary offset to avoid collisions */ - dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), - client->addr | ((client->flags & I2C_CLIENT_TEN) - ? 0xa000 : 0)); + i2c_dev_set_name(adap, client); status = device_register(&client->dev); if (status) goto out_err; @@ -1103,7 +1113,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, return AE_OK; memset(&info, 0, sizeof(info)); - info.acpi_node.handle = handle; + info.acpi_node.companion = adev; info.irq = -1; INIT_LIST_HEAD(&resource_list); @@ -1614,9 +1624,14 @@ static int i2c_cmd(struct device *dev, void *_arg) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_cmd_arg *arg = _arg; + struct i2c_driver *driver; + + if (!client || !client->dev.driver) + return 0; - if (client && client->driver && client->driver->command) - client->driver->command(client, arg->cmd, arg->arg); + driver = to_i2c_driver(client->dev.driver); + if (driver->command) + driver->command(client, arg->cmd, arg->arg); return 0; } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index c3ccdea3d180..80b47e8ce030 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -102,8 +102,8 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev) kfree(i2c_dev); } -static ssize_t show_adapter_name(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt)); @@ -111,7 +111,13 @@ static ssize_t show_adapter_name(struct device *dev, return -ENODEV; return sprintf(buf, "%s\n", i2c_dev->adap->name); } -static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL); +static DEVICE_ATTR_RO(name); + +static struct attribute *i2c_attrs[] = { + &dev_attr_name.attr, + NULL, +}; +ATTRIBUTE_GROUPS(i2c); /* ------------------------------------------------------------------------- */ @@ -562,15 +568,10 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) res = PTR_ERR(i2c_dev->dev); goto error; } - res = device_create_file(i2c_dev->dev, &dev_attr_name); - if (res) - goto error_destroy; pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; -error_destroy: - device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); error: return_i2c_dev(i2c_dev); return res; @@ -589,7 +590,6 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) if (!i2c_dev) /* attach_adapter must have failed */ return 0; - device_remove_file(i2c_dev->dev, &dev_attr_name); return_i2c_dev(i2c_dev); device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); @@ -637,6 +637,7 @@ static int __init i2c_dev_init(void) res = PTR_ERR(i2c_dev_class); goto out_unreg_chrdev; } + i2c_dev_class->dev_groups = i2c_groups; /* Keep track of adapters which will be added or removed later */ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier); diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 44d4c6071c15..c99b22987366 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -46,6 +46,7 @@ static int smbus_do_alert(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); struct alert_data *data = addrp; + struct i2c_driver *driver; if (!client || client->addr != data->addr) return 0; @@ -54,12 +55,13 @@ static int smbus_do_alert(struct device *dev, void *addrp) /* * Drivers should either disable alerts, or provide at least - * a minimal handler. Lock so client->driver won't change. + * a minimal handler. Lock so the driver won't change. */ device_lock(dev); - if (client->driver) { - if (client->driver->alert) - client->driver->alert(client, data->flag); + if (client->dev.driver) { + driver = to_i2c_driver(client->dev.driver); + if (driver->alert) + driver->alert(client, data->flag); else dev_warn(&client->dev, "no driver alert()!\n"); } else diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c index 928656e241dd..c58e093b6032 100644 --- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c +++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c @@ -238,7 +238,7 @@ static struct platform_driver i2c_arbitrator_driver = { .driver = { .owner = THIS_MODULE, .name = "i2c-arb-gpio-challenge", - .of_match_table = of_match_ptr(i2c_arbitrator_of_match), + .of_match_table = i2c_arbitrator_of_match, }, }; diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index a764da777f08..8a8c56f4b026 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c @@ -30,15 +30,15 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val) int i; for (i = 0; i < mux->data.n_gpios; i++) - gpio_set_value(mux->gpio_base + mux->data.gpios[i], - val & (1 << i)); + gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i], + val & (1 << i)); } static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan) { struct gpiomux *mux = data; - i2c_mux_gpio_set(mux, mux->data.values[chan]); + i2c_mux_gpio_set(mux, chan); return 0; } @@ -228,7 +228,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev) unsigned int class = mux->data.classes ? mux->data.classes[i] : 0; mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, - i, class, + mux->data.values[i], class, i2c_mux_gpio_select, deselect); if (!mux->adap[i]) { ret = -ENODEV; @@ -283,7 +283,7 @@ static struct platform_driver i2c_mux_gpio_driver = { .driver = { .owner = THIS_MODULE, .name = "i2c-mux-gpio", - .of_match_table = of_match_ptr(i2c_mux_gpio_of_match), + .of_match_table = i2c_mux_gpio_of_match, }, }; diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 68a37157377d..d7978dc4ad0b 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -24,6 +24,7 @@ #include <linux/i2c-mux-pinctrl.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/of.h> struct i2c_mux_pinctrl { struct device *dev; diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 140c8ef50529..d9e1f7ccfe6f 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -7,6 +7,7 @@ * Copyright (C) 2006 Hannes Reinecke */ +#include <linux/acpi.h> #include <linux/ata.h> #include <linux/delay.h> #include <linux/device.h> @@ -19,8 +20,6 @@ #include <linux/dmi.h> #include <linux/module.h> -#include <acpi/acpi_bus.h> - #define REGS_PER_GTF 7 struct GTM_buffer { @@ -128,7 +127,7 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); - dev_handle = DEVICE_ACPI_HANDLE(dev); + dev_handle = ACPI_HANDLE(dev); if (!dev_handle) { DEBPRINT("no acpi handle for device\n"); goto err; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3226ce98fb18..cbd4e9abc47e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1,7 +1,7 @@ /* * intel_idle.c - native hardware idle loop for modern Intel processors * - * Copyright (c) 2010, Intel Corporation. + * Copyright (c) 2013, Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it @@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[] __initdata = { { .enter = NULL } }; +static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = { + { + .name = "C1-AVN", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle }, + { + .name = "C6-AVN", + .desc = "MWAIT 0x51", + .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 15, + .target_residency = 45, + .enter = &intel_idle }, +}; /** * intel_idle @@ -462,6 +478,11 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_avn = { + .state_table = avn_cstates, + .disable_promotion_to_c1e = true, +}; + #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } @@ -483,6 +504,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x3f, idle_cpu_hsw), ICPU(0x45, idle_cpu_hsw), ICPU(0x46, idle_cpu_hsw), + ICPU(0x4D, idle_cpu_avn), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index b84791f03a27..5ceda710f516 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -31,17 +31,6 @@ config INFINIBAND_USER_ACCESS libibverbs, libibcm and a hardware driver library from <http://www.openfabrics.org/git/>. -config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - bool "Experimental and unstable ABI for userspace access to flow steering verbs" - depends on INFINIBAND_USER_ACCESS - depends on STAGING - ---help--- - The final ABI for userspace access to flow steering verbs - has not been defined. To use the current ABI, *WHICH WILL - CHANGE IN THE FUTURE*, say Y here. - - If unsure, say N. - config INFINIBAND_USER_MEM bool depends on INFINIBAND_USER_ACCESS != n diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 784b97cb05b0..f2ef7ef0f36f 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -383,14 +383,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv) { unsigned long flags; int id; - static int next_id; idr_preload(GFP_KERNEL); spin_lock_irqsave(&cm.lock, flags); - id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT); - if (id >= 0) - next_id = max(id + 1, 0); + id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT); spin_unlock_irqrestore(&cm.lock, flags); idr_preload_end(); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d2172e71f985..8e49db690f33 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -328,28 +328,6 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) return ret; } -static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num) -{ - int i; - int err; - struct ib_port_attr props; - union ib_gid tmp; - - err = ib_query_port(device, port_num, &props); - if (err) - return err; - - for (i = 0; i < props.gid_tbl_len; ++i) { - err = ib_query_gid(device, port_num, i, &tmp); - if (err) - return err; - if (!memcmp(&tmp, gid, sizeof tmp)) - return 0; - } - - return -EADDRNOTAVAIL; -} - static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) { dev_addr->dev_type = ARPHRD_INFINIBAND; @@ -371,13 +349,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a return ret; } -static int cma_acquire_dev(struct rdma_id_private *id_priv) +static int cma_acquire_dev(struct rdma_id_private *id_priv, + struct rdma_id_private *listen_id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct cma_device *cma_dev; union ib_gid gid, iboe_gid; int ret = -ENODEV; - u8 port; + u8 port, found_port; enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; @@ -389,17 +368,39 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) iboe_addr_get_sgid(dev_addr, &iboe_gid); memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof gid); + if (listen_id_priv && + rdma_port_get_link_layer(listen_id_priv->id.device, + listen_id_priv->id.port_num) == dev_ll) { + cma_dev = listen_id_priv->cma_dev; + port = listen_id_priv->id.port_num; + if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && + rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) + ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, + &found_port, NULL); + else + ret = ib_find_cached_gid(cma_dev->device, &gid, + &found_port, NULL); + + if (!ret && (port == found_port)) { + id_priv->id.port_num = found_port; + goto out; + } + } list_for_each_entry(cma_dev, &dev_list, list) { for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { + if (listen_id_priv && + listen_id_priv->cma_dev == cma_dev && + listen_id_priv->id.port_num == port) + continue; if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) - ret = find_gid_port(cma_dev->device, &iboe_gid, port); + ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL); else - ret = find_gid_port(cma_dev->device, &gid, port); + ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL); - if (!ret) { - id_priv->id.port_num = port; + if (!ret && (port == found_port)) { + id_priv->id.port_num = found_port; goto out; } } @@ -1292,7 +1293,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); - ret = cma_acquire_dev(conn_id); + ret = cma_acquire_dev(conn_id, listen_id); if (ret) goto err2; @@ -1451,7 +1452,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, { struct rdma_cm_id *new_cm_id; struct rdma_id_private *listen_id, *conn_id; - struct net_device *dev = NULL; struct rdma_cm_event event; int ret; struct ib_device_attr attr; @@ -1481,7 +1481,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, goto out; } - ret = cma_acquire_dev(conn_id); + ret = cma_acquire_dev(conn_id, listen_id); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); @@ -1529,8 +1529,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, cma_deref_id(conn_id); out: - if (dev) - dev_put(dev); mutex_unlock(&listen_id->handler_mutex); return ret; } @@ -2066,7 +2064,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, goto out; if (!status && !id_priv->cma_dev) - status = cma_acquire_dev(id_priv); + status = cma_acquire_dev(id_priv, NULL); if (status) { if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, @@ -2563,7 +2561,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (ret) goto err1; - ret = cma_acquire_dev(id_priv); + ret = cma_acquire_dev(id_priv, NULL); if (ret) goto err1; } diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index da06abde9e0d..a1e9cba84944 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) list_for_each_entry(client, &client_list, list) { if (client->index == index) { if (op < 0 || op >= client->nops || - !client->cb_table[RDMA_NL_GET_OP(op)].dump) + !client->cb_table[op].dump) return -EINVAL; { diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index cde1e7b5b85d..faad2caf22b1 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -612,6 +612,7 @@ static ssize_t show_node_type(struct device *device, switch (dev->node_type) { case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); + case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type); case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b0f189be543b..ab8b1c30b36b 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -57,7 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL"); static unsigned int max_backlog = 1024; static struct ctl_table_header *ucma_ctl_table_hdr; -static ctl_table ucma_ctl_table[] = { +static struct ctl_table ucma_ctl_table[] = { { .procname = "max_backlog", .data = &max_backlog, @@ -271,7 +271,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, goto out; } ctx->backlog--; - } else if (!ctx->uid) { + } else if (!ctx->uid || ctx->cm_id != cm_id) { /* * We ignore events for new connections until userspace has set * their context. This can only happen if an error occurs on a diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index d8f9c6c272d7..bdc842e9faef 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -47,6 +47,14 @@ #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> +#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ + do { \ + (udata)->inbuf = (void __user *) (ibuf); \ + (udata)->outbuf = (void __user *) (obuf); \ + (udata)->inlen = (ilen); \ + (udata)->outlen = (olen); \ + } while (0) + /* * Our lifetime rules for these structs are the following: * @@ -178,6 +186,22 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler, struct ib_event *event); void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd); +struct ib_uverbs_flow_spec { + union { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_spec_eth eth; + struct ib_uverbs_flow_spec_ipv4 ipv4; + struct ib_uverbs_flow_spec_tcp_udp tcp_udp; + }; +}; + #define IB_UVERBS_DECLARE_CMD(name) \ ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ const char __user *buf, int in_len, \ @@ -217,9 +241,13 @@ IB_UVERBS_DECLARE_CMD(destroy_srq); IB_UVERBS_DECLARE_CMD(create_xsrq); IB_UVERBS_DECLARE_CMD(open_xrcd); IB_UVERBS_DECLARE_CMD(close_xrcd); -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING -IB_UVERBS_DECLARE_CMD(create_flow); -IB_UVERBS_DECLARE_CMD(destroy_flow); -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + +#define IB_UVERBS_DECLARE_EX_CMD(name) \ + int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \ + struct ib_udata *ucore, \ + struct ib_udata *uhw) + +IB_UVERBS_DECLARE_EX_CMD(create_flow); +IB_UVERBS_DECLARE_EX_CMD(destroy_flow); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 2f0f01b70e3b..65f6e7dc380c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -54,17 +54,7 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ - -#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ - do { \ - (udata)->inbuf = (void __user *) (ibuf); \ - (udata)->outbuf = (void __user *) (obuf); \ - (udata)->inlen = (ilen); \ - (udata)->outlen = (olen); \ - } while (0) /* * The ib_uobject locking scheme is as follows: @@ -939,13 +929,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) return -EINVAL; - /* - * Local write permission is required if remote write or - * remote atomic permission is also requested. - */ - if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && - !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) - return -EINVAL; + ret = ib_check_mr_access(cmd.access_flags); + if (ret) + return ret; uobj = kmalloc(sizeof *uobj, GFP_KERNEL); if (!uobj) @@ -2128,6 +2114,9 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, } next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; + if (next->opcode == IB_WR_SEND_WITH_IMM) + next->ex.imm_data = + (__be32 __force) user_wr->ex.imm_data; } else { switch (next->opcode) { case IB_WR_RDMA_WRITE_WITH_IMM: @@ -2601,8 +2590,7 @@ out_put: return ret ? ret : in_len; } -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING -static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, +static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec) { ib_spec->type = kern_spec->type; @@ -2642,28 +2630,31 @@ static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, return 0; } -ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) { struct ib_uverbs_create_flow cmd; struct ib_uverbs_create_flow_resp resp; struct ib_uobject *uobj; struct ib_flow *flow_id; - struct ib_kern_flow_attr *kern_flow_attr; + struct ib_uverbs_flow_attr *kern_flow_attr; struct ib_flow_attr *flow_attr; struct ib_qp *qp; int err = 0; void *kern_spec; void *ib_spec; int i; - int kern_attr_size; - if (out_len < sizeof(resp)) + if (ucore->outlen < sizeof(resp)) return -ENOSPC; - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; + err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + if (err) + return err; + + ucore->inbuf += sizeof(cmd); + ucore->inlen -= sizeof(cmd); if (cmd.comp_mask) return -EINVAL; @@ -2672,32 +2663,27 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) return -EPERM; - if (cmd.flow_attr.num_of_specs < 0 || - cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) + if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) return -EINVAL; - kern_attr_size = cmd.flow_attr.size - sizeof(cmd) - - sizeof(struct ib_uverbs_cmd_hdr_ex); - - if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len || - kern_attr_size < 0 || kern_attr_size > - (cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec))) + if (cmd.flow_attr.size > ucore->inlen || + cmd.flow_attr.size > + (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) return -EINVAL; if (cmd.flow_attr.num_of_specs) { - kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL); + kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, + GFP_KERNEL); if (!kern_flow_attr) return -ENOMEM; memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); - if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd), - kern_attr_size)) { - err = -EFAULT; + err = ib_copy_from_udata(kern_flow_attr + 1, ucore, + cmd.flow_attr.size); + if (err) goto err_free_attr; - } } else { kern_flow_attr = &cmd.flow_attr; - kern_attr_size = sizeof(cmd.flow_attr); } uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); @@ -2714,7 +2700,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, goto err_uobj; } - flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL); + flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); if (!flow_attr) { err = -ENOMEM; goto err_put; @@ -2729,19 +2715,22 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, kern_spec = kern_flow_attr + 1; ib_spec = flow_attr + 1; - for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) { + for (i = 0; i < flow_attr->num_of_specs && + cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && + cmd.flow_attr.size >= + ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { err = kern_spec_to_ib_spec(kern_spec, ib_spec); if (err) goto err_free; flow_attr->size += ((union ib_flow_spec *) ib_spec)->size; - kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size; - kern_spec += ((struct ib_kern_spec *) kern_spec)->size; + cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; + kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; ib_spec += ((union ib_flow_spec *) ib_spec)->size; } - if (kern_attr_size) { - pr_warn("create flow failed, %d bytes left from uverb cmd\n", - kern_attr_size); + if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { + pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", + i, cmd.flow_attr.size); goto err_free; } flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); @@ -2760,11 +2749,10 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, memset(&resp, 0, sizeof(resp)); resp.flow_handle = uobj->id; - if (copy_to_user((void __user *)(unsigned long) cmd.response, - &resp, sizeof(resp))) { - err = -EFAULT; + err = ib_copy_to_udata(ucore, + &resp, sizeof(resp)); + if (err) goto err_copy; - } put_qp_read(qp); mutex_lock(&file->mutex); @@ -2777,7 +2765,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, kfree(flow_attr); if (cmd.flow_attr.num_of_specs) kfree(kern_flow_attr); - return in_len; + return 0; err_copy: idr_remove_uobj(&ib_uverbs_rule_idr, uobj); destroy_flow: @@ -2794,16 +2782,18 @@ err_free_attr: return err; } -ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) { +int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) +{ struct ib_uverbs_destroy_flow cmd; struct ib_flow *flow_id; struct ib_uobject *uobj; int ret; - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; + ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + if (ret) + return ret; uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, file->ucontext); @@ -2825,9 +2815,8 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file, put_uobj(uobj); - return ret ? ret : in_len; + return ret; } -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, struct ib_uverbs_create_xsrq *cmd, diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 2df31f68ea09..34386943ebcf 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -115,10 +115,13 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow, - [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ +}; + +static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) = { + [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, + [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow }; static void ib_uverbs_add_one(struct ib_device *device); @@ -589,6 +592,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, { struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_cmd_hdr hdr; + __u32 flags; if (count < sizeof hdr) return -EINVAL; @@ -596,45 +600,105 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if (copy_from_user(&hdr, buf, sizeof hdr)) return -EFAULT; - if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || - !uverbs_cmd_table[hdr.command]) - return -EINVAL; + flags = (hdr.command & + IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; - if (!file->ucontext && - hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) - return -EINVAL; + if (!flags) { + __u32 command; - if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) - return -ENOSYS; + if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK | + IB_USER_VERBS_CMD_COMMAND_MASK)) + return -EINVAL; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) { - struct ib_uverbs_cmd_hdr_ex hdr_ex; + command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; - if (copy_from_user(&hdr_ex, buf, sizeof(hdr_ex))) - return -EFAULT; + if (command >= ARRAY_SIZE(uverbs_cmd_table) || + !uverbs_cmd_table[command]) + return -EINVAL; - if (((hdr_ex.in_words + hdr_ex.provider_in_words) * 4) != count) + if (!file->ucontext && + command != IB_USER_VERBS_CMD_GET_CONTEXT) return -EINVAL; - return uverbs_cmd_table[hdr.command](file, - buf + sizeof(hdr_ex), - (hdr_ex.in_words + - hdr_ex.provider_in_words) * 4, - (hdr_ex.out_words + - hdr_ex.provider_out_words) * 4); - } else { -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << command))) + return -ENOSYS; + if (hdr.in_words * 4 != count) return -EINVAL; - return uverbs_cmd_table[hdr.command](file, - buf + sizeof(hdr), - hdr.in_words * 4, - hdr.out_words * 4); -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING + return uverbs_cmd_table[command](file, + buf + sizeof(hdr), + hdr.in_words * 4, + hdr.out_words * 4); + + } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) { + __u32 command; + + struct ib_uverbs_ex_cmd_hdr ex_hdr; + struct ib_udata ucore; + struct ib_udata uhw; + int err; + size_t written_count = count; + + if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK | + IB_USER_VERBS_CMD_COMMAND_MASK)) + return -EINVAL; + + command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; + + if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || + !uverbs_ex_cmd_table[command]) + return -ENOSYS; + + if (!file->ucontext) + return -EINVAL; + + if (!(file->device->ib_dev->uverbs_ex_cmd_mask & (1ull << command))) + return -ENOSYS; + + if (count < (sizeof(hdr) + sizeof(ex_hdr))) + return -EINVAL; + + if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) + return -EFAULT; + + count -= sizeof(hdr) + sizeof(ex_hdr); + buf += sizeof(hdr) + sizeof(ex_hdr); + + if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) + return -EINVAL; + + if (ex_hdr.response) { + if (!hdr.out_words && !ex_hdr.provider_out_words) + return -EINVAL; + } else { + if (hdr.out_words || ex_hdr.provider_out_words) + return -EINVAL; + } + + INIT_UDATA(&ucore, + (hdr.in_words) ? buf : 0, + (unsigned long)ex_hdr.response, + hdr.in_words * 8, + hdr.out_words * 8); + + INIT_UDATA(&uhw, + (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, + (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0, + ex_hdr.provider_in_words * 8, + ex_hdr.provider_out_words * 8); + + err = uverbs_ex_cmd_table[command](file, + &ucore, + &uhw); + + if (err) + return err; + + return written_count; } -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + + return -ENOSYS; } static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a321df28bab2..d4f6ddf72ffa 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -114,6 +114,8 @@ rdma_node_get_transport(enum rdma_node_type node_type) return RDMA_TRANSPORT_IB; case RDMA_NODE_RNIC: return RDMA_TRANSPORT_IWARP; + case RDMA_NODE_USNIC: + return RDMA_TRANSPORT_USNIC; default: BUG(); return 0; @@ -130,6 +132,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_ case RDMA_TRANSPORT_IB: return IB_LINK_LAYER_INFINIBAND; case RDMA_TRANSPORT_IWARP: + case RDMA_TRANSPORT_USNIC: return IB_LINK_LAYER_ETHERNET; default: return IB_LINK_LAYER_UNSPECIFIED; @@ -958,6 +961,11 @@ EXPORT_SYMBOL(ib_resize_cq); struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) { struct ib_mr *mr; + int err; + + err = ib_check_mr_access(mr_access_flags); + if (err) + return ERR_PTR(err); mr = pd->device->get_dma_mr(pd, mr_access_flags); @@ -980,6 +988,11 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, u64 *iova_start) { struct ib_mr *mr; + int err; + + err = ib_check_mr_access(mr_access_flags); + if (err) + return ERR_PTR(err); if (!pd->device->reg_phys_mr) return ERR_PTR(-ENOSYS); @@ -1010,6 +1023,10 @@ int ib_rereg_phys_mr(struct ib_mr *mr, struct ib_pd *old_pd; int ret; + ret = ib_check_mr_access(mr_access_flags); + if (ret) + return ret; + if (!mr->device->rereg_phys_mr) return -ENOSYS; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 33d2cc6ab562..4a033853312e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -602,10 +602,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size); - PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " + PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " "qpmask 0x%x cqshift %lu cqmask 0x%x\n", (unsigned)pci_resource_len(rdev->lldi.pdev, 2), - (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2), + (u64)pci_resource_start(rdev->lldi.pdev, 2), rdev->lldi.db_reg, rdev->lldi.gts_reg, rdev->qpshift, rdev->qpmask, diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c index f5cb13b21445..cc04b7ba3488 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c @@ -280,9 +280,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, int j; int ret; - ret = get_user_pages(current, current->mm, addr, - npages, 0, 1, pages, NULL); - + ret = get_user_pages_fast(addr, npages, 0, pages); if (ret != npages) { int i; @@ -811,10 +809,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd, while (dim) { const int mxp = 8; - down_write(¤t->mm->mmap_sem); ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp); - up_write(¤t->mm->mmap_sem); - if (ret <= 0) goto done_unlock; else { diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index d5e60f44ba5a..66dbf8062374 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -324,7 +324,7 @@ static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) u32 i; i = cq->mcq.cons_index; - while (get_sw_cqe(cq, i & cq->ibcq.cqe)) + while (get_sw_cqe(cq, i)) ++i; return i - cq->mcq.cons_index; @@ -365,7 +365,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mutex_lock(&cq->resize_mutex); - if (entries < 1 || entries > dev->dev->caps.max_cqes) { + if (entries < 1) { err = -EINVAL; goto out; } @@ -376,6 +376,11 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) goto out; } + if (entries > dev->dev->caps.max_cqes) { + err = -EINVAL; + goto out; + } + if (ibcq->uobject) { err = mlx4_alloc_resize_umem(dev, cq, entries, udata); if (err) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 6a0a0d29660d..1958c5ca792a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1685,11 +1685,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.create_flow = mlx4_ib_create_flow; ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - ibdev->ib_dev.uverbs_cmd_mask |= - (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | - (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + ibdev->ib_dev.uverbs_ex_cmd_mask |= + (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); } mlx4_ib_alloc_eqs(dev, ibdev); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 344ab03948a3..b72627429745 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -556,7 +556,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, goto err_db; } mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); - (*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; *index = to_mucontext(context)->uuari.uars[0].index; @@ -620,7 +620,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, } mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); - (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT; + (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; *index = dev->mdev.priv.uuari.uars[0].index; return 0; @@ -653,8 +653,11 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, int eqn; int err; + if (entries < 0) + return ERR_PTR(-EINVAL); + entries = roundup_pow_of_two(entries + 1); - if (entries < 1 || entries > dev->mdev.caps.max_cqes) + if (entries > dev->mdev.caps.max_cqes) return ERR_PTR(-EINVAL); cq = kzalloc(sizeof(*cq), GFP_KERNEL); @@ -747,17 +750,9 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq) return 0; } -static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq, - u32 rsn) +static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) { - u32 lrsn; - - if (srq) - lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff; - else - lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff; - - return rsn == lrsn; + return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); } void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) @@ -787,8 +782,8 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; - if (is_equal_rsn(cqe64, srq, rsn)) { - if (srq) + if (is_equal_rsn(cqe64, rsn)) { + if (srq && (ntohl(cqe64->srqn) & 0xffffff)) mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); ++nfreed; } else if (nfreed) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b1a6cb3a2809..306534109627 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -745,7 +745,8 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); seg->start_addr = 0; - err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in)); + err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in), + NULL, NULL, NULL); if (err) { mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); goto err_in; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 836be9157242..4c134d93d4fc 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -262,6 +262,9 @@ struct mlx5_ib_mr { int npages; struct completion done; enum ib_wc_status status; + struct mlx5_ib_dev *dev; + struct mlx5_create_mkey_mbox_out out; + unsigned long start; }; struct mlx5_ib_fast_reg_page_list { @@ -323,6 +326,7 @@ struct mlx5_cache_ent { struct mlx5_ib_dev *dev; struct work_struct work; struct delayed_work dwork; + int pending; }; struct mlx5_mr_cache { @@ -358,6 +362,8 @@ struct mlx5_ib_dev { spinlock_t mr_lock; struct mlx5_ib_resources devr; struct mlx5_mr_cache cache; + struct timer_list delay_timer; + int fill_delay; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3453580b1eb2..039c3e40fcb4 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -35,11 +35,12 @@ #include <linux/random.h> #include <linux/debugfs.h> #include <linux/export.h> +#include <linux/delay.h> #include <rdma/ib_umem.h> #include "mlx5_ib.h" enum { - DEF_CACHE_SIZE = 10, + MAX_PENDING_REG_MR = 8, }; enum { @@ -63,6 +64,51 @@ static int order2idx(struct mlx5_ib_dev *dev, int order) return order - cache->ent[0].order; } +static void reg_mr_callback(int status, void *context) +{ + struct mlx5_ib_mr *mr = context; + struct mlx5_ib_dev *dev = mr->dev; + struct mlx5_mr_cache *cache = &dev->cache; + int c = order2idx(dev, mr->order); + struct mlx5_cache_ent *ent = &cache->ent[c]; + u8 key; + unsigned long flags; + + spin_lock_irqsave(&ent->lock, flags); + ent->pending--; + spin_unlock_irqrestore(&ent->lock, flags); + if (status) { + mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); + kfree(mr); + dev->fill_delay = 1; + mod_timer(&dev->delay_timer, jiffies + HZ); + return; + } + + if (mr->out.hdr.status) { + mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n", + mr->out.hdr.status, + be32_to_cpu(mr->out.hdr.syndrome)); + kfree(mr); + dev->fill_delay = 1; + mod_timer(&dev->delay_timer, jiffies + HZ); + return; + } + + spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags); + key = dev->mdev.priv.mkey_key++; + spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags); + mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; + + cache->last_add = jiffies; + + spin_lock_irqsave(&ent->lock, flags); + list_add_tail(&mr->list, &ent->head); + ent->cur++; + ent->size++; + spin_unlock_irqrestore(&ent->lock, flags); +} + static int add_keys(struct mlx5_ib_dev *dev, int c, int num) { struct mlx5_mr_cache *cache = &dev->cache; @@ -78,36 +124,39 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) return -ENOMEM; for (i = 0; i < num; i++) { + if (ent->pending >= MAX_PENDING_REG_MR) { + err = -EAGAIN; + break; + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { err = -ENOMEM; - goto out; + break; } mr->order = ent->order; mr->umred = 1; + mr->dev = dev; in->seg.status = 1 << 6; in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN; in->seg.log2_page_size = 12; + spin_lock_irq(&ent->lock); + ent->pending++; + spin_unlock_irq(&ent->lock); + mr->start = jiffies; err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, - sizeof(*in)); + sizeof(*in), reg_mr_callback, + mr, &mr->out); if (err) { mlx5_ib_warn(dev, "create mkey failed %d\n", err); kfree(mr); - goto out; + break; } - cache->last_add = jiffies; - - spin_lock(&ent->lock); - list_add_tail(&mr->list, &ent->head); - ent->cur++; - ent->size++; - spin_unlock(&ent->lock); } -out: kfree(in); return err; } @@ -121,16 +170,16 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) int i; for (i = 0; i < num; i++) { - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (list_empty(&ent->head)) { - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); return; } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); ent->cur--; ent->size--; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); if (err) mlx5_ib_warn(dev, "failed destroy mkey\n"); @@ -162,9 +211,13 @@ static ssize_t size_write(struct file *filp, const char __user *buf, return -EINVAL; if (var > ent->size) { - err = add_keys(dev, c, var - ent->size); - if (err) - return err; + do { + err = add_keys(dev, c, var - ent->size); + if (err && err != -EAGAIN) + return err; + + usleep_range(3000, 5000); + } while (err); } else if (var < ent->size) { remove_keys(dev, c, ent->size - var); } @@ -280,23 +333,37 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) struct mlx5_ib_dev *dev = ent->dev; struct mlx5_mr_cache *cache = &dev->cache; int i = order2idx(dev, ent->order); + int err; if (cache->stopped) return; ent = &dev->cache.ent[i]; - if (ent->cur < 2 * ent->limit) { - add_keys(dev, i, 1); - if (ent->cur < 2 * ent->limit) - queue_work(cache->wq, &ent->work); + if (ent->cur < 2 * ent->limit && !dev->fill_delay) { + err = add_keys(dev, i, 1); + if (ent->cur < 2 * ent->limit) { + if (err == -EAGAIN) { + mlx5_ib_dbg(dev, "returned eagain, order %d\n", + i + 2); + queue_delayed_work(cache->wq, &ent->dwork, + msecs_to_jiffies(3)); + } else if (err) { + mlx5_ib_warn(dev, "command failed order %d, err %d\n", + i + 2, err); + queue_delayed_work(cache->wq, &ent->dwork, + msecs_to_jiffies(1000)); + } else { + queue_work(cache->wq, &ent->work); + } + } } else if (ent->cur > 2 * ent->limit) { if (!someone_adding(cache) && - time_after(jiffies, cache->last_add + 60 * HZ)) { + time_after(jiffies, cache->last_add + 300 * HZ)) { remove_keys(dev, i, 1); if (ent->cur > ent->limit) queue_work(cache->wq, &ent->work); } else { - queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ); + queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); } } } @@ -336,18 +403,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); ent->cur--; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); if (ent->cur < ent->limit) queue_work(cache->wq, &ent->work); break; } - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); queue_work(cache->wq, &ent->work); @@ -374,12 +441,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return; } ent = &cache->ent[c]; - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); list_add_tail(&mr->list, &ent->head); ent->cur++; if (ent->cur > 2 * ent->limit) shrink = 1; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); if (shrink) queue_work(cache->wq, &ent->work); @@ -394,16 +461,16 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) cancel_delayed_work(&ent->dwork); while (1) { - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (list_empty(&ent->head)) { - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); return; } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); ent->cur--; ent->size--; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); if (err) mlx5_ib_warn(dev, "failed destroy mkey\n"); @@ -464,12 +531,18 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) debugfs_remove_recursive(dev->cache.root); } +static void delay_time_func(unsigned long ctx) +{ + struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx; + + dev->fill_delay = 0; +} + int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) { struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; int limit; - int size; int err; int i; @@ -479,6 +552,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) return -ENOMEM; } + setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev); for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { INIT_LIST_HEAD(&cache->ent[i].head); spin_lock_init(&cache->ent[i].lock); @@ -489,13 +563,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->order = i + 2; ent->dev = dev; - if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) { - size = dev->mdev.profile->mr_cache[i].size; + if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) limit = dev->mdev.profile->mr_cache[i].limit; - } else { - size = DEF_CACHE_SIZE; + else limit = 0; - } + INIT_WORK(&ent->work, cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); ent->limit = limit; @@ -522,6 +594,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) clean_keys(dev, i); destroy_workqueue(dev->cache.wq); + del_timer_sync(&dev->delay_timer); return 0; } @@ -551,7 +624,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); seg->start_addr = 0; - err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in)); + err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL, + NULL); if (err) goto err_in; @@ -660,14 +734,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, int err; int i; - for (i = 0; i < 10; i++) { + for (i = 0; i < 1; i++) { mr = alloc_cached_mr(dev, order); if (mr) break; err = add_keys(dev, order2idx(dev, order), 1); - if (err) { - mlx5_ib_warn(dev, "add_keys failed\n"); + if (err && err != -EAGAIN) { + mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); break; } } @@ -759,8 +833,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); in->seg.log2_page_size = page_shift; in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen); + in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, + 1 << page_shift)); + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL, + NULL, NULL); if (err) { mlx5_ib_warn(dev, "create mkey failed\n"); goto err_2; @@ -944,7 +1020,8 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, * TBD not needed - issue 197292 */ in->seg.log2_page_size = PAGE_SHIFT; - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in)); + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL, + NULL, NULL); kfree(in); if (err) goto err_free; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 5659ea880741..7c6b4ba49bec 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -551,7 +551,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, } mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); (*in)->ctx.log_pg_sz_remote_qpn = - cpu_to_be32((page_shift - PAGE_SHIFT) << 24); + cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); (*in)->ctx.params2 = cpu_to_be32(offset << 6); (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); @@ -648,7 +648,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, goto err_buf; } (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); - (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24); + (*in)->ctx.log_pg_sz_remote_qpn = + cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); /* Set "fast registration enabled" for all kernel QPs */ (*in)->ctx.params1 |= cpu_to_be32(1 << 11); (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); @@ -1317,9 +1318,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RNR_TIMEOUT | - MLX5_QP_OPTPAR_PM_STATE, + MLX5_QP_OPTPAR_PM_STATE | + MLX5_QP_OPTPAR_ALT_ADDR_PATH, [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | - MLX5_QP_OPTPAR_PM_STATE, + MLX5_QP_OPTPAR_PM_STATE | + MLX5_QP_OPTPAR_ALT_ADDR_PATH, [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | MLX5_QP_OPTPAR_SRQN | MLX5_QP_OPTPAR_CQN_RCV, @@ -1550,7 +1553,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, mlx5_cur = to_mlx5_state(cur_state); mlx5_new = to_mlx5_state(new_state); mlx5_st = to_mlx5_st(ibqp->qp_type); - if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0) + if (mlx5_st < 0) goto out; optpar = ib_mask_to_mlx5_opt(attr_mask); @@ -1744,6 +1747,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, MLX5_MKEY_MASK_PD | MLX5_MKEY_MASK_LR | MLX5_MKEY_MASK_LW | + MLX5_MKEY_MASK_KEY | MLX5_MKEY_MASK_RR | MLX5_MKEY_MASK_RW | MLX5_MKEY_MASK_A | @@ -1800,7 +1804,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); seg->len = cpu_to_be64(wr->wr.fast_reg.length); seg->log2_page_size = wr->wr.fast_reg.page_shift; - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | + mlx5_mkey_variant(wr->wr.fast_reg.rkey)); } static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, @@ -1913,6 +1918,10 @@ static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); if (!li) { + if (unlikely(wr->wr.fast_reg.page_list_len > + wr->wr.fast_reg.page_list->max_page_list_len)) + return -ENOMEM; + set_frwr_pages(*seg, wr, mdev, pd, writ); *seg += sizeof(struct mlx5_wqe_data_seg); *size += (sizeof(struct mlx5_wqe_data_seg) / 16); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 0aa478bc291a..210b3eaf188a 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -123,7 +123,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, goto err_in; } - (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); return 0; @@ -192,7 +192,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, } srq->wq_sig = !!srq_signature; - (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; return 0; @@ -390,9 +390,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq) mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); ib_umem_release(msrq->umem); } else { - kfree(msrq->wrid); - mlx5_buf_free(&dev->mdev, &msrq->buf); - mlx5_db_free(&dev->mdev, &msrq->db); + destroy_srq_kernel(dev, msrq); } kfree(srq); diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 5b53ca5a2284..8308e3634767 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -2834,7 +2834,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->qp_context = nesqp->ibqp.qp_context; init_attr->send_cq = nesqp->ibqp.send_cq; init_attr->recv_cq = nesqp->ibqp.recv_cq; - init_attr->srq = nesqp->ibqp.srq = nesqp->ibqp.srq; + init_attr->srq = nesqp->ibqp.srq; init_attr->cap = attr->cap; return 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index adc11d14f878..294dd27b601e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -122,6 +122,32 @@ struct mqe_ctx { bool cmd_done; }; +struct ocrdma_hw_mr { + u32 lkey; + u8 fr_mr; + u8 remote_atomic; + u8 remote_rd; + u8 remote_wr; + u8 local_rd; + u8 local_wr; + u8 mw_bind; + u8 rsvd; + u64 len; + struct ocrdma_pbl *pbl_table; + u32 num_pbls; + u32 num_pbes; + u32 pbl_size; + u32 pbe_size; + u64 fbo; + u64 va; +}; + +struct ocrdma_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + struct ocrdma_hw_mr hwmr; +}; + struct ocrdma_dev { struct ib_device ibdev; struct ocrdma_dev_attr attr; @@ -169,7 +195,7 @@ struct ocrdma_dev { struct list_head entry; struct rcu_head rcu; int id; - u64 stag_arr[OCRDMA_MAX_STAG]; + struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG]; u16 pvid; }; @@ -294,31 +320,6 @@ struct ocrdma_qp { u16 db_cache; }; -struct ocrdma_hw_mr { - u32 lkey; - u8 fr_mr; - u8 remote_atomic; - u8 remote_rd; - u8 remote_wr; - u8 local_rd; - u8 local_wr; - u8 mw_bind; - u8 rsvd; - u64 len; - struct ocrdma_pbl *pbl_table; - u32 num_pbls; - u32 num_pbes; - u32 pbl_size; - u32 pbe_size; - u64 fbo; - u64 va; -}; - -struct ocrdma_mr { - struct ib_mr ibmr; - struct ib_umem *umem; - struct ocrdma_hw_mr hwmr; -}; struct ocrdma_ucontext { struct ib_ucontext ibucontext; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 50219ab2279d..56bf32fcb62c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1783,7 +1783,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, u32 max_sges = attrs->cap.max_send_sge; /* QP1 may exceed 127 */ - max_wqe_allocated = min_t(int, attrs->cap.max_send_wr + 1, + max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1, dev->attr.max_wqe); status = ocrdma_build_q_conf(&max_wqe_allocated, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 0ce7674621ea..91443bcb9e0e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -452,9 +452,6 @@ static void ocrdma_remove_free(struct rcu_head *rcu) { struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu); - ocrdma_free_resources(dev); - ocrdma_cleanup_hw(dev); - idr_remove(&ocrdma_dev_id, dev->id); kfree(dev->mbx_cmd); ib_dealloc_device(&dev->ibdev); @@ -470,6 +467,10 @@ static void ocrdma_remove(struct ocrdma_dev *dev) spin_lock(&ocrdma_devlist_lock); list_del_rcu(&dev->entry); spin_unlock(&ocrdma_devlist_lock); + + ocrdma_free_resources(dev); + ocrdma_cleanup_hw(dev); + call_rcu(&dev->rcu, ocrdma_remove_free); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 69f1d1221a6b..7686dceadd29 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1981,9 +1981,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); - if ((wr->wr.fast_reg.page_list_len > - qp->dev->attr.max_pages_per_frmr) || - (wr->wr.fast_reg.length > 0xffffffffULL)) + if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) return -EINVAL; hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); @@ -2839,7 +2837,7 @@ struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len) goto mbx_err; mr->ibmr.rkey = mr->hwmr.lkey; mr->ibmr.lkey = mr->hwmr.lkey; - dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr; + dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = mr; return &mr->ibmr; mbx_err: ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 016e7429adf6..5bfc02f450e6 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6190,21 +6190,20 @@ static int setup_txselect(const char *str, struct kernel_param *kp) { struct qib_devdata *dd; unsigned long val; - int ret; - + char *n; if (strlen(str) >= MAX_ATTEN_LEN) { pr_info("txselect_values string too long\n"); return -ENOSPC; } - ret = kstrtoul(str, 0, &val); - if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + + val = simple_strtoul(str, &n, 0); + if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ)) { pr_info("txselect_values must start with a number < %d\n", TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); - return ret ? ret : -EINVAL; + return -EINVAL; } - strcpy(txselect_list, str); + list_for_each_entry(dd, &qib_dev_list, list) if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) set_no_qsfp_atten(dd, 1); diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h index 28874f8606f8..941d4d50d8e7 100644 --- a/drivers/infiniband/hw/qib/qib_mad.h +++ b/drivers/infiniband/hw/qib/qib_mad.h @@ -54,7 +54,7 @@ struct ib_node_info { __be32 revision; u8 local_port_num; u8 vendor_id[3]; -} __attribute__ ((packed)); +} __packed; struct ib_mad_notice_attr { u8 generic_type; @@ -73,7 +73,7 @@ struct ib_mad_notice_attr { __be16 reserved; __be16 lid; /* where violation happened */ u8 port_num; /* where violation happened */ - } __attribute__ ((packed)) ntc_129_131; + } __packed ntc_129_131; struct { __be16 reserved; @@ -83,14 +83,14 @@ struct ib_mad_notice_attr { __be32 new_cap_mask; /* new capability mask */ u8 reserved3; u8 change_flags; /* low 3 bits only */ - } __attribute__ ((packed)) ntc_144; + } __packed ntc_144; struct { __be16 reserved; __be16 lid; /* lid where sys guid changed */ __be16 reserved2; __be64 new_sys_guid; - } __attribute__ ((packed)) ntc_145; + } __packed ntc_145; struct { __be16 reserved; @@ -104,7 +104,7 @@ struct ib_mad_notice_attr { u8 reserved3; u8 dr_trunc_hop; u8 dr_rtn_path[30]; - } __attribute__ ((packed)) ntc_256; + } __packed ntc_256; struct { __be16 reserved; @@ -115,7 +115,7 @@ struct ib_mad_notice_attr { __be32 qp2; /* high 8 bits reserved */ union ib_gid gid1; union ib_gid gid2; - } __attribute__ ((packed)) ntc_257_258; + } __packed ntc_257_258; } details; }; @@ -209,7 +209,7 @@ struct ib_pma_portcounters_cong { __be64 port_rcv_packets; __be64 port_xmit_wait; __be64 port_adr_events; -} __attribute__ ((packed)); +} __packed; #define IB_PMA_CONG_HW_CONTROL_TIMER 0x00 #define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01 diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index d0a0ea0c14d6..165aee2ca8a0 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -594,8 +594,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, else j = npages; - ret = get_user_pages(current, current->mm, addr, - j, 0, 1, pages, NULL); + ret = get_user_pages_fast(addr, j, 0, pages); if (ret != j) { i = 0; j = ret; @@ -1294,11 +1293,8 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd, int mxp = 8; int ndesc = 0; - down_write(¤t->mm->mmap_sem); ret = qib_user_sdma_queue_pkts(dd, ppd, pq, iov, dim, &list, &mxp, &ndesc); - up_write(¤t->mm->mmap_sem); - if (ret < 0) goto done_unlock; else { diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 012e2c7575ad..a01c7d2cf541 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -150,14 +150,14 @@ struct ib_reth { __be64 vaddr; __be32 rkey; __be32 length; -} __attribute__ ((packed)); +} __packed; struct ib_atomic_eth { __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ __be32 rkey; __be64 swap_data; __be64 compare_data; -} __attribute__ ((packed)); +} __packed; struct qib_other_headers { __be32 bth[3]; @@ -178,7 +178,7 @@ struct qib_other_headers { __be32 aeth; struct ib_atomic_eth atomic_eth; } u; -} __attribute__ ((packed)); +} __packed; /* * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes @@ -195,12 +195,12 @@ struct qib_ib_header { } l; struct qib_other_headers oth; } u; -} __attribute__ ((packed)); +} __packed; struct qib_pio_header { __le32 pbc[2]; struct qib_ib_header hdr; -} __attribute__ ((packed)); +} __packed; /* * There is one struct qib_mcast for each multicast GID. diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index eb71aaa26a9a..c639f90cfda4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -101,6 +101,7 @@ enum { IPOIB_MCAST_FLAG_SENDONLY = 1, IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ IPOIB_MCAST_FLAG_ATTACHED = 3, + IPOIB_MCAST_JOIN_STARTED = 4, MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, @@ -151,6 +152,7 @@ struct ipoib_mcast { struct sk_buff_head pkt_queue; struct net_device *dev; + struct completion done; }; struct ipoib_rx_buf { @@ -299,7 +301,7 @@ struct ipoib_dev_priv { unsigned long flags; - struct mutex vlan_mutex; + struct rw_semaphore vlan_rwsem; struct rb_root path_tree; struct list_head path_list; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7a3175400b2a..1377f85911c2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -140,7 +140,8 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev, static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, struct ipoib_cm_rx_buf *rx_ring, int id, int frags, - u64 mapping[IPOIB_CM_RX_SG]) + u64 mapping[IPOIB_CM_RX_SG], + gfp_t gfp) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; @@ -164,7 +165,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, } for (i = 0; i < frags; i++) { - struct page *page = alloc_page(GFP_ATOMIC); + struct page *page = alloc_page(gfp); if (!page) goto partial_error; @@ -382,7 +383,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1, - rx->rx_ring[i].mapping)) { + rx->rx_ring[i].mapping, + GFP_KERNEL)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); ret = -ENOMEM; goto err_count; @@ -639,7 +641,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; - newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping); + newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, + mapping, GFP_ATOMIC); if (unlikely(!newskb)) { /* * If we can't allocate a new RX buffer, dump @@ -1556,7 +1559,8 @@ int ipoib_cm_dev_init(struct net_device *dev) for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, priv->cm.num_frags - 1, - priv->cm.srq_ring[i].mapping)) { + priv->cm.srq_ring[i].mapping, + GFP_KERNEL)) { ipoib_warn(priv, "failed to allocate " "receive buffer %d\n", i); ipoib_cm_dev_cleanup(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 196b1d13cbcb..6a7003ddb0be 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -685,15 +685,13 @@ int ipoib_ib_dev_open(struct net_device *dev) ret = ipoib_ib_post_receives(dev); if (ret) { ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); - ipoib_ib_dev_stop(dev, 1); - return -1; + goto dev_stop; } ret = ipoib_cm_dev_open(dev); if (ret) { ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); - ipoib_ib_dev_stop(dev, 1); - return -1; + goto dev_stop; } clear_bit(IPOIB_STOP_REAPER, &priv->flags); @@ -704,6 +702,11 @@ int ipoib_ib_dev_open(struct net_device *dev) napi_enable(&priv->napi); return 0; +dev_stop: + if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) + napi_enable(&priv->napi); + ipoib_ib_dev_stop(dev, 1); + return -1; } static void ipoib_pkey_dev_check_presence(struct net_device *dev) @@ -746,10 +749,8 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush) if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { mutex_lock(&pkey_mutex); set_bit(IPOIB_PKEY_STOP, &priv->flags); - cancel_delayed_work(&priv->pkey_poll_task); + cancel_delayed_work_sync(&priv->pkey_poll_task); mutex_unlock(&pkey_mutex); - if (flush) - flush_workqueue(ipoib_workqueue); } ipoib_mcast_stop_thread(dev, flush); @@ -974,7 +975,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, u16 new_index; int result; - mutex_lock(&priv->vlan_mutex); + down_read(&priv->vlan_rwsem); /* * Flush any child interfaces too -- they might be up even if @@ -983,7 +984,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, list_for_each_entry(cpriv, &priv->child_intfs, list) __ipoib_ib_dev_flush(cpriv, level); - mutex_unlock(&priv->vlan_mutex); + up_read(&priv->vlan_rwsem); if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { /* for non-child devices must check/update the pkey value here */ @@ -1081,6 +1082,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "cleaning up ib_dev\n"); + /* + * We must make sure there are no more (path) completions + * that may wish to touch priv fields that are no longer valid + */ + ipoib_flush_paths(dev); ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_dev_flush(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 82cec1af902c..d64ed05fb082 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -119,7 +119,7 @@ int ipoib_open(struct net_device *dev) struct ipoib_dev_priv *cpriv; /* Bring up any child interfaces too */ - mutex_lock(&priv->vlan_mutex); + down_read(&priv->vlan_rwsem); list_for_each_entry(cpriv, &priv->child_intfs, list) { int flags; @@ -129,7 +129,7 @@ int ipoib_open(struct net_device *dev) dev_change_flags(cpriv->dev, flags | IFF_UP); } - mutex_unlock(&priv->vlan_mutex); + up_read(&priv->vlan_rwsem); } netif_start_queue(dev); @@ -162,7 +162,7 @@ static int ipoib_stop(struct net_device *dev) struct ipoib_dev_priv *cpriv; /* Bring down any child interfaces too */ - mutex_lock(&priv->vlan_mutex); + down_read(&priv->vlan_rwsem); list_for_each_entry(cpriv, &priv->child_intfs, list) { int flags; @@ -172,7 +172,7 @@ static int ipoib_stop(struct net_device *dev) dev_change_flags(cpriv->dev, flags & ~IFF_UP); } - mutex_unlock(&priv->vlan_mutex); + up_read(&priv->vlan_rwsem); } return 0; @@ -1350,7 +1350,7 @@ void ipoib_setup(struct net_device *dev) ipoib_set_ethtool_ops(dev); - netif_napi_add(dev, &priv->napi, ipoib_poll, 100); + netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT); dev->watchdog_timeo = HZ; @@ -1372,7 +1372,7 @@ void ipoib_setup(struct net_device *dev) spin_lock_init(&priv->lock); - mutex_init(&priv->vlan_mutex); + init_rwsem(&priv->vlan_rwsem); INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->child_intfs); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index cecb98a4c662..d4e005720d01 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -386,8 +386,10 @@ static int ipoib_mcast_join_complete(int status, mcast->mcmember.mgid.raw, status); /* We trap for port events ourselves. */ - if (status == -ENETRESET) - return 0; + if (status == -ENETRESET) { + status = 0; + goto out; + } if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); @@ -407,7 +409,8 @@ static int ipoib_mcast_join_complete(int status, if (mcast == priv->broadcast) queue_work(ipoib_workqueue, &priv->carrier_on_task); - return 0; + status = 0; + goto out; } if (mcast->logcount++ < 20) { @@ -434,7 +437,8 @@ static int ipoib_mcast_join_complete(int status, mcast->backoff * HZ); spin_unlock_irq(&priv->lock); mutex_unlock(&mcast_mutex); - +out: + complete(&mcast->done); return status; } @@ -484,11 +488,15 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, } set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags); + mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, comp_mask, GFP_KERNEL, ipoib_mcast_join_complete, mcast); if (IS_ERR(mcast->mc)) { clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + complete(&mcast->done); ret = PTR_ERR(mcast->mc); ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); @@ -510,10 +518,18 @@ void ipoib_mcast_join_task(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, mcast_task.work); struct net_device *dev = priv->dev; + struct ib_port_attr port_attr; if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) return; + if (ib_query_port(priv->ca, priv->port, &port_attr) || + port_attr.state != IB_PORT_ACTIVE) { + ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n", + port_attr.state); + return; + } + if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) ipoib_warn(priv, "ib_query_gid() failed\n"); else @@ -751,6 +767,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); + /* seperate between the wait to the leave*/ + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) + if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags)) + wait_for_completion(&mcast->done); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(dev, mcast); ipoib_mcast_free(mcast); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index f81abe16cf09..c29b5c838833 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -142,10 +142,10 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head priv = netdev_priv(dev); ppriv = netdev_priv(priv->parent); - mutex_lock(&ppriv->vlan_mutex); + down_write(&ppriv->vlan_rwsem); unregister_netdevice_queue(dev, head); list_del(&priv->list); - mutex_unlock(&ppriv->vlan_mutex); + up_write(&ppriv->vlan_rwsem); } static size_t ipoib_get_size(const struct net_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 8292554bccb5..9fad7b5ac8b9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -140,7 +140,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) if (!rtnl_trylock()) return restart_syscall(); - mutex_lock(&ppriv->vlan_mutex); + down_write(&ppriv->vlan_rwsem); /* * First ensure this isn't a duplicate. We check the parent device and @@ -163,7 +163,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD); out: - mutex_unlock(&ppriv->vlan_mutex); + up_write(&ppriv->vlan_rwsem); if (result) free_netdev(priv->dev); @@ -185,7 +185,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (!rtnl_trylock()) return restart_syscall(); - mutex_lock(&ppriv->vlan_mutex); + + down_write(&ppriv->vlan_rwsem); list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey && priv->child_type == IPOIB_LEGACY_CHILD) { @@ -195,7 +196,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) break; } } - mutex_unlock(&ppriv->vlan_mutex); + up_write(&ppriv->vlan_rwsem); + rtnl_unlock(); if (dev) { diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig index ce3fd32167dc..02f9759ebb1a 100644 --- a/drivers/infiniband/ulp/isert/Kconfig +++ b/drivers/infiniband/ulp/isert/Kconfig @@ -1,5 +1,5 @@ config INFINIBAND_ISERT - tristate "iSCSI Extentions for RDMA (iSER) target support" + tristate "iSCSI Extensions for RDMA (iSER) target support" depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET ---help--- - Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics. + Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics. diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index f93baf8254c4..a88631918e85 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -46,6 +46,7 @@ #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_dbg.h> +#include <scsi/scsi_tcq.h> #include <scsi/srp.h> #include <scsi/scsi_transport_srp.h> @@ -86,6 +87,32 @@ module_param(topspin_workarounds, int, 0444); MODULE_PARM_DESC(topspin_workarounds, "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); +static struct kernel_param_ops srp_tmo_ops; + +static int srp_reconnect_delay = 10; +module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); + +static int srp_fast_io_fail_tmo = 15; +module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_io_fail_tmo, + "Number of seconds between the observation of a transport" + " layer error and failing all I/O. \"off\" means that this" + " functionality is disabled."); + +static int srp_dev_loss_tmo = 600; +module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dev_loss_tmo, + "Maximum number of seconds that the SRP transport should" + " insulate transport layer errors. After this time has been" + " exceeded the SCSI host is removed. Should be" + " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + " if fast_io_fail_tmo has not been set. \"off\" means that" + " this functionality is disabled."); + static void srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device); static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); @@ -102,6 +129,48 @@ static struct ib_client srp_client = { static struct ib_sa_client srp_sa_client; +static int srp_tmo_get(char *buffer, const struct kernel_param *kp) +{ + int tmo = *(int *)kp->arg; + + if (tmo >= 0) + return sprintf(buffer, "%d", tmo); + else + return sprintf(buffer, "off"); +} + +static int srp_tmo_set(const char *val, const struct kernel_param *kp) +{ + int tmo, res; + + if (strncmp(val, "off", 3) != 0) { + res = kstrtoint(val, 0, &tmo); + if (res) + goto out; + } else { + tmo = -1; + } + if (kp->arg == &srp_reconnect_delay) + res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, + srp_dev_loss_tmo); + else if (kp->arg == &srp_fast_io_fail_tmo) + res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); + else + res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, + tmo); + if (res) + goto out; + *(int *)kp->arg = tmo; + +out: + return res; +} + +static struct kernel_param_ops srp_tmo_ops = { + .get = srp_tmo_get, + .set = srp_tmo_set, +}; + static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) { return (struct srp_target_port *) host->hostdata; @@ -231,16 +300,16 @@ static int srp_create_target_ib(struct srp_target_port *target) return -ENOMEM; recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, - srp_recv_completion, NULL, target, SRP_RQ_SIZE, - target->comp_vector); + srp_recv_completion, NULL, target, + target->queue_size, target->comp_vector); if (IS_ERR(recv_cq)) { ret = PTR_ERR(recv_cq); goto err; } send_cq = ib_create_cq(target->srp_host->srp_dev->dev, - srp_send_completion, NULL, target, SRP_SQ_SIZE, - target->comp_vector); + srp_send_completion, NULL, target, + target->queue_size, target->comp_vector); if (IS_ERR(send_cq)) { ret = PTR_ERR(send_cq); goto err_recv_cq; @@ -249,8 +318,8 @@ static int srp_create_target_ib(struct srp_target_port *target) ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); init_attr->event_handler = srp_qp_event; - init_attr->cap.max_send_wr = SRP_SQ_SIZE; - init_attr->cap.max_recv_wr = SRP_RQ_SIZE; + init_attr->cap.max_send_wr = target->queue_size; + init_attr->cap.max_recv_wr = target->queue_size; init_attr->cap.max_recv_sge = 1; init_attr->cap.max_send_sge = 1; init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; @@ -296,6 +365,10 @@ err: return ret; } +/* + * Note: this function may be called without srp_alloc_iu_bufs() having been + * invoked. Hence the target->[rt]x_ring checks. + */ static void srp_free_target_ib(struct srp_target_port *target) { int i; @@ -307,10 +380,18 @@ static void srp_free_target_ib(struct srp_target_port *target) target->qp = NULL; target->send_cq = target->recv_cq = NULL; - for (i = 0; i < SRP_RQ_SIZE; ++i) - srp_free_iu(target->srp_host, target->rx_ring[i]); - for (i = 0; i < SRP_SQ_SIZE; ++i) - srp_free_iu(target->srp_host, target->tx_ring[i]); + if (target->rx_ring) { + for (i = 0; i < target->queue_size; ++i) + srp_free_iu(target->srp_host, target->rx_ring[i]); + kfree(target->rx_ring); + target->rx_ring = NULL; + } + if (target->tx_ring) { + for (i = 0; i < target->queue_size; ++i) + srp_free_iu(target->srp_host, target->tx_ring[i]); + kfree(target->tx_ring); + target->tx_ring = NULL; + } } static void srp_path_rec_completion(int status, @@ -390,7 +471,7 @@ static int srp_send_req(struct srp_target_port *target) req->param.responder_resources = 4; req->param.remote_cm_response_timeout = 20; req->param.local_cm_response_timeout = 20; - req->param.retry_count = 7; + req->param.retry_count = target->tl_retry_count; req->param.rnr_retry_count = 7; req->param.max_cm_retries = 15; @@ -496,7 +577,11 @@ static void srp_free_req_data(struct srp_target_port *target) struct srp_request *req; int i; - for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) { + if (!target->req_ring) + return; + + for (i = 0; i < target->req_ring_size; ++i) { + req = &target->req_ring[i]; kfree(req->fmr_list); kfree(req->map_page); if (req->indirect_dma_addr) { @@ -506,6 +591,50 @@ static void srp_free_req_data(struct srp_target_port *target) } kfree(req->indirect_desc); } + + kfree(target->req_ring); + target->req_ring = NULL; +} + +static int srp_alloc_req_data(struct srp_target_port *target) +{ + struct srp_device *srp_dev = target->srp_host->srp_dev; + struct ib_device *ibdev = srp_dev->dev; + struct srp_request *req; + dma_addr_t dma_addr; + int i, ret = -ENOMEM; + + INIT_LIST_HEAD(&target->free_reqs); + + target->req_ring = kzalloc(target->req_ring_size * + sizeof(*target->req_ring), GFP_KERNEL); + if (!target->req_ring) + goto out; + + for (i = 0; i < target->req_ring_size; ++i) { + req = &target->req_ring[i]; + req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), + GFP_KERNEL); + req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *), + GFP_KERNEL); + req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); + if (!req->fmr_list || !req->map_page || !req->indirect_desc) + goto out; + + dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, + target->indirect_size, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(ibdev, dma_addr)) + goto out; + + req->indirect_dma_addr = dma_addr; + req->index = i; + list_add_tail(&req->list, &target->free_reqs); + } + ret = 0; + +out: + return ret; } /** @@ -528,12 +657,20 @@ static void srp_remove_target(struct srp_target_port *target) WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_del_scsi_host_attr(target->scsi_host); + srp_rport_get(target->rport); srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); srp_disconnect_target(target); ib_destroy_cm_id(target->cm_id); srp_free_target_ib(target); + cancel_work_sync(&target->tl_err_work); + srp_rport_put(target->rport); srp_free_req_data(target); + + spin_lock(&target->srp_host->target_lock); + list_del(&target->list); + spin_unlock(&target->srp_host->target_lock); + scsi_host_put(target->scsi_host); } @@ -545,10 +682,6 @@ static void srp_remove_work(struct work_struct *work) WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_remove_target(target); - - spin_lock(&target->srp_host->target_lock); - list_del(&target->list); - spin_unlock(&target->srp_host->target_lock); } static void srp_rport_delete(struct srp_rport *rport) @@ -686,23 +819,42 @@ static void srp_free_req(struct srp_target_port *target, spin_unlock_irqrestore(&target->lock, flags); } -static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) +static void srp_finish_req(struct srp_target_port *target, + struct srp_request *req, int result) { struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); if (scmnd) { srp_free_req(target, req, scmnd, 0); - scmnd->result = DID_RESET << 16; + scmnd->result = result; scmnd->scsi_done(scmnd); } } -static int srp_reconnect_target(struct srp_target_port *target) +static void srp_terminate_io(struct srp_rport *rport) { - struct Scsi_Host *shost = target->scsi_host; - int i, ret; + struct srp_target_port *target = rport->lld_data; + int i; - scsi_target_block(&shost->shost_gendev); + for (i = 0; i < target->req_ring_size; ++i) { + struct srp_request *req = &target->req_ring[i]; + srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16); + } +} + +/* + * It is up to the caller to ensure that srp_rport_reconnect() calls are + * serialized and that no concurrent srp_queuecommand(), srp_abort(), + * srp_reset_device() or srp_reset_host() calls will occur while this function + * is in progress. One way to realize that is not to call this function + * directly but to call srp_reconnect_rport() instead since that last function + * serializes calls of this function via rport->mutex and also blocks + * srp_queuecommand() calls before invoking this function. + */ +static int srp_rport_reconnect(struct srp_rport *rport) +{ + struct srp_target_port *target = rport->lld_data; + int i, ret; srp_disconnect_target(target); /* @@ -721,41 +873,21 @@ static int srp_reconnect_target(struct srp_target_port *target) else srp_create_target_ib(target); - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { + for (i = 0; i < target->req_ring_size; ++i) { struct srp_request *req = &target->req_ring[i]; - if (req->scmnd) - srp_reset_req(target, req); + srp_finish_req(target, req, DID_RESET << 16); } INIT_LIST_HEAD(&target->free_tx); - for (i = 0; i < SRP_SQ_SIZE; ++i) + for (i = 0; i < target->queue_size; ++i) list_add(&target->tx_ring[i]->list, &target->free_tx); if (ret == 0) ret = srp_connect_target(target); - scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : - SDEV_TRANSPORT_OFFLINE); - target->transport_offline = !!ret; - - if (ret) - goto err; - - shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n"); - - return ret; - -err: - shost_printk(KERN_ERR, target->scsi_host, - PFX "reconnect failed (%d), removing target port.\n", ret); - - /* - * We couldn't reconnect, so kill our target port off. - * However, we have to defer the real removal because we - * are in the context of the SCSI error handler now, which - * will deadlock if we call scsi_remove_host(). - */ - srp_queue_remove_work(target); + if (ret == 0) + shost_printk(KERN_INFO, target->scsi_host, + PFX "reconnect succeeded\n"); return ret; } @@ -1302,15 +1434,30 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) PFX "Recv failed with error code %d\n", res); } -static void srp_handle_qp_err(enum ib_wc_status wc_status, - enum ib_wc_opcode wc_opcode, +/** + * srp_tl_err_work() - handle a transport layer error + * + * Note: This function may get invoked before the rport has been created, + * hence the target->rport test. + */ +static void srp_tl_err_work(struct work_struct *work) +{ + struct srp_target_port *target; + + target = container_of(work, struct srp_target_port, tl_err_work); + if (target->rport) + srp_start_tl_fail_timers(target->rport); +} + +static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err, struct srp_target_port *target) { if (target->connected && !target->qp_in_error) { shost_printk(KERN_ERR, target->scsi_host, PFX "failed %s status %d\n", - wc_opcode & IB_WC_RECV ? "receive" : "send", + send_err ? "send" : "receive", wc_status); + queue_work(system_long_wq, &target->tl_err_work); } target->qp_in_error = true; } @@ -1325,7 +1472,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) if (likely(wc.status == IB_WC_SUCCESS)) { srp_handle_recv(target, &wc); } else { - srp_handle_qp_err(wc.status, wc.opcode, target); + srp_handle_qp_err(wc.status, false, target); } } } @@ -1341,7 +1488,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) iu = (struct srp_iu *) (uintptr_t) wc.wr_id; list_add(&iu->list, &target->free_tx); } else { - srp_handle_qp_err(wc.status, wc.opcode, target); + srp_handle_qp_err(wc.status, true, target); } } } @@ -1349,17 +1496,29 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(shost); + struct srp_rport *rport = target->rport; struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; struct ib_device *dev; unsigned long flags; - int len; + int len, result; + const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; + + /* + * The SCSI EH thread is the only context from which srp_queuecommand() + * can get invoked for blocked devices (SDEV_BLOCK / + * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by + * locking the rport mutex if invoked from inside the SCSI EH. + */ + if (in_scsi_eh) + mutex_lock(&rport->mutex); - if (unlikely(target->transport_offline)) { - scmnd->result = DID_NO_CONNECT << 16; + result = srp_chkready(target->rport); + if (unlikely(result)) { + scmnd->result = result; scmnd->scsi_done(scmnd); - return 0; + goto unlock_rport; } spin_lock_irqsave(&target->lock, flags); @@ -1404,6 +1563,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) goto err_unmap; } +unlock_rport: + if (in_scsi_eh) + mutex_unlock(&rport->mutex); + return 0; err_unmap: @@ -1418,14 +1581,30 @@ err_iu: err_unlock: spin_unlock_irqrestore(&target->lock, flags); + if (in_scsi_eh) + mutex_unlock(&rport->mutex); + return SCSI_MLQUEUE_HOST_BUSY; } +/* + * Note: the resources allocated in this function are freed in + * srp_free_target_ib(). + */ static int srp_alloc_iu_bufs(struct srp_target_port *target) { int i; - for (i = 0; i < SRP_RQ_SIZE; ++i) { + target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring), + GFP_KERNEL); + if (!target->rx_ring) + goto err_no_ring; + target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring), + GFP_KERNEL); + if (!target->tx_ring) + goto err_no_ring; + + for (i = 0; i < target->queue_size; ++i) { target->rx_ring[i] = srp_alloc_iu(target->srp_host, target->max_ti_iu_len, GFP_KERNEL, DMA_FROM_DEVICE); @@ -1433,7 +1612,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) goto err; } - for (i = 0; i < SRP_SQ_SIZE; ++i) { + for (i = 0; i < target->queue_size; ++i) { target->tx_ring[i] = srp_alloc_iu(target->srp_host, target->max_iu_len, GFP_KERNEL, DMA_TO_DEVICE); @@ -1446,16 +1625,18 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) return 0; err: - for (i = 0; i < SRP_RQ_SIZE; ++i) { + for (i = 0; i < target->queue_size; ++i) { srp_free_iu(target->srp_host, target->rx_ring[i]); - target->rx_ring[i] = NULL; - } - - for (i = 0; i < SRP_SQ_SIZE; ++i) { srp_free_iu(target->srp_host, target->tx_ring[i]); - target->tx_ring[i] = NULL; } + +err_no_ring: + kfree(target->tx_ring); + target->tx_ring = NULL; + kfree(target->rx_ring); + target->rx_ring = NULL; + return -ENOMEM; } @@ -1506,6 +1687,9 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, target->scsi_host->can_queue = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, target->scsi_host->can_queue); + target->scsi_host->cmd_per_lun + = min_t(int, target->scsi_host->can_queue, + target->scsi_host->cmd_per_lun); } else { shost_printk(KERN_WARNING, target->scsi_host, PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); @@ -1513,7 +1697,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, goto error; } - if (!target->rx_ring[0]) { + if (!target->rx_ring) { ret = srp_alloc_iu_bufs(target); if (ret) goto error; @@ -1533,7 +1717,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, if (ret) goto error_free; - for (i = 0; i < SRP_RQ_SIZE; i++) { + for (i = 0; i < target->queue_size; i++) { struct srp_iu *iu = target->rx_ring[i]; ret = srp_post_recv(target, iu); if (ret) @@ -1672,6 +1856,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) if (ib_send_cm_drep(cm_id, NULL, 0)) shost_printk(KERN_ERR, target->scsi_host, PFX "Sending CM DREP failed\n"); + queue_work(system_long_wq, &target->tl_err_work); break; case IB_CM_TIMEWAIT_EXIT: @@ -1698,9 +1883,61 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) return 0; } +/** + * srp_change_queue_type - changing device queue tag type + * @sdev: scsi device struct + * @tag_type: requested tag type + * + * Returns queue tag type. + */ +static int +srp_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + + return tag_type; +} + +/** + * srp_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP + * (see include/scsi/scsi_host.h for definition) + * + * Returns queue depth. + */ +static int +srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { + max_depth = shost->can_queue; + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + } else if (reason == SCSI_QDEPTH_QFULL) + scsi_track_queue_full(sdev, qdepth); + else + return -EOPNOTSUPP; + + return sdev->queue_depth; +} + static int srp_send_tsk_mgmt(struct srp_target_port *target, u64 req_tag, unsigned int lun, u8 func) { + struct srp_rport *rport = target->rport; struct ib_device *dev = target->srp_host->srp_dev->dev; struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; @@ -1710,12 +1947,20 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, init_completion(&target->tsk_mgmt_done); + /* + * Lock the rport mutex to avoid that srp_create_target_ib() is + * invoked while a task management function is being sent. + */ + mutex_lock(&rport->mutex); spin_lock_irq(&target->lock); iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); spin_unlock_irq(&target->lock); - if (!iu) + if (!iu) { + mutex_unlock(&rport->mutex); + return -1; + } ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); @@ -1732,8 +1977,11 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, DMA_TO_DEVICE); if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); + mutex_unlock(&rport->mutex); + return -1; } + mutex_unlock(&rport->mutex); if (!wait_for_completion_timeout(&target->tsk_mgmt_done, msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) @@ -1751,11 +1999,11 @@ static int srp_abort(struct scsi_cmnd *scmnd) shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); if (!req || !srp_claim_req(target, req, scmnd)) - return FAILED; + return SUCCESS; if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, SRP_TSK_ABORT_TASK) == 0) ret = SUCCESS; - else if (target->transport_offline) + else if (target->rport->state == SRP_RPORT_LOST) ret = FAST_IO_FAIL; else ret = FAILED; @@ -1779,10 +2027,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) if (target->tsk_mgmt_status) return FAILED; - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { + for (i = 0; i < target->req_ring_size; ++i) { struct srp_request *req = &target->req_ring[i]; if (req->scmnd && req->scmnd->device == scmnd->device) - srp_reset_req(target, req); + srp_finish_req(target, req, DID_RESET << 16); } return SUCCESS; @@ -1791,14 +2039,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) static int srp_reset_host(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); - int ret = FAILED; shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); - if (!srp_reconnect_target(target)) - ret = SUCCESS; - - return ret; + return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; } static int srp_slave_configure(struct scsi_device *sdev) @@ -1851,6 +2095,14 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); } +static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%pI6\n", target->path.sgid.raw); +} + static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1907,6 +2159,14 @@ static ssize_t show_comp_vector(struct device *dev, return sprintf(buf, "%d\n", target->comp_vector); } +static ssize_t show_tl_retry_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->tl_retry_count); +} + static ssize_t show_cmd_sg_entries(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1927,6 +2187,7 @@ static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); +static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); @@ -1934,6 +2195,7 @@ static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); +static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); @@ -1942,6 +2204,7 @@ static struct device_attribute *srp_host_attrs[] = { &dev_attr_ioc_guid, &dev_attr_service_id, &dev_attr_pkey, + &dev_attr_sgid, &dev_attr_dgid, &dev_attr_orig_dgid, &dev_attr_req_lim, @@ -1949,6 +2212,7 @@ static struct device_attribute *srp_host_attrs[] = { &dev_attr_local_ib_port, &dev_attr_local_ib_device, &dev_attr_comp_vector, + &dev_attr_tl_retry_count, &dev_attr_cmd_sg_entries, &dev_attr_allow_ext_sg, NULL @@ -1961,14 +2225,16 @@ static struct scsi_host_template srp_template = { .slave_configure = srp_slave_configure, .info = srp_target_info, .queuecommand = srp_queuecommand, + .change_queue_depth = srp_change_queue_depth, + .change_queue_type = srp_change_queue_type, .eh_abort_handler = srp_abort, .eh_device_reset_handler = srp_reset_device, .eh_host_reset_handler = srp_reset_host, .skip_settle_delay = true, .sg_tablesize = SRP_DEF_SG_TABLESIZE, - .can_queue = SRP_CMD_SQ_SIZE, + .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, .this_id = -1, - .cmd_per_lun = SRP_CMD_SQ_SIZE, + .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = srp_host_attrs }; @@ -1994,6 +2260,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) } rport->lld_data = target; + target->rport = rport; spin_lock(&host->target_lock); list_add_tail(&target->list, &host->target_list); @@ -2073,6 +2340,8 @@ enum { SRP_OPT_ALLOW_EXT_SG = 1 << 10, SRP_OPT_SG_TABLESIZE = 1 << 11, SRP_OPT_COMP_VECTOR = 1 << 12, + SRP_OPT_TL_RETRY_COUNT = 1 << 13, + SRP_OPT_QUEUE_SIZE = 1 << 14, SRP_OPT_ALL = (SRP_OPT_ID_EXT | SRP_OPT_IOC_GUID | SRP_OPT_DGID | @@ -2094,6 +2363,8 @@ static const match_table_t srp_opt_tokens = { { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, + { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, + { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, { SRP_OPT_ERR, NULL } }; @@ -2188,13 +2459,25 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->scsi_host->max_sectors = token; break; + case SRP_OPT_QUEUE_SIZE: + if (match_int(args, &token) || token < 1) { + pr_warn("bad queue_size parameter '%s'\n", p); + goto out; + } + target->scsi_host->can_queue = token; + target->queue_size = token + SRP_RSP_SQ_SIZE + + SRP_TSK_MGMT_SQ_SIZE; + if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) + target->scsi_host->cmd_per_lun = token; + break; + case SRP_OPT_MAX_CMD_PER_LUN: - if (match_int(args, &token)) { + if (match_int(args, &token) || token < 1) { pr_warn("bad max cmd_per_lun parameter '%s'\n", p); goto out; } - target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); + target->scsi_host->cmd_per_lun = token; break; case SRP_OPT_IO_CLASS: @@ -2257,6 +2540,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->comp_vector = token; break; + case SRP_OPT_TL_RETRY_COUNT: + if (match_int(args, &token) || token < 2 || token > 7) { + pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", + p); + goto out; + } + target->tl_retry_count = token; + break; + default: pr_warn("unknown parameter or missing value '%s' in target creation request\n", p); @@ -2273,6 +2565,12 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) pr_warn("target creation request is missing parameter '%s'\n", srp_opt_tokens[i].pattern); + if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue + && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) + pr_warn("cmd_per_lun = %d > queue_size = %d\n", + target->scsi_host->cmd_per_lun, + target->scsi_host->can_queue); + out: kfree(options); return ret; @@ -2287,8 +2585,7 @@ static ssize_t srp_create_target(struct device *dev, struct Scsi_Host *target_host; struct srp_target_port *target; struct ib_device *ibdev = host->srp_dev->dev; - dma_addr_t dma_addr; - int i, ret; + int ret; target_host = scsi_host_alloc(&srp_template, sizeof (struct srp_target_port)); @@ -2311,11 +2608,15 @@ static ssize_t srp_create_target(struct device *dev, target->cmd_sg_cnt = cmd_sg_entries; target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; target->allow_ext_sg = allow_ext_sg; + target->tl_retry_count = 7; + target->queue_size = SRP_DEFAULT_QUEUE_SIZE; ret = srp_parse_options(buf, target); if (ret) goto err; + target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; + if (!srp_conn_unique(target->srp_host, target)) { shost_printk(KERN_INFO, target->scsi_host, PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", @@ -2339,31 +2640,13 @@ static ssize_t srp_create_target(struct device *dev, sizeof (struct srp_indirect_buf) + target->cmd_sg_cnt * sizeof (struct srp_direct_buf); + INIT_WORK(&target->tl_err_work, srp_tl_err_work); INIT_WORK(&target->remove_work, srp_remove_work); spin_lock_init(&target->lock); INIT_LIST_HEAD(&target->free_tx); - INIT_LIST_HEAD(&target->free_reqs); - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { - struct srp_request *req = &target->req_ring[i]; - - req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *), - GFP_KERNEL); - req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *), - GFP_KERNEL); - req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); - if (!req->fmr_list || !req->map_page || !req->indirect_desc) - goto err_free_mem; - - dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, - target->indirect_size, - DMA_TO_DEVICE); - if (ib_dma_mapping_error(ibdev, dma_addr)) - goto err_free_mem; - - req->indirect_dma_addr = dma_addr; - req->index = i; - list_add_tail(&req->list, &target->free_reqs); - } + ret = srp_alloc_req_data(target); + if (ret) + goto err_free_mem; ib_query_gid(ibdev, host->port, 0, &target->path.sgid); @@ -2612,7 +2895,14 @@ static void srp_remove_one(struct ib_device *device) } static struct srp_function_template ib_srp_transport_functions = { + .has_rport_state = true, + .reset_timer_if_blocked = true, + .reconnect_delay = &srp_reconnect_delay, + .fast_io_fail_tmo = &srp_fast_io_fail_tmo, + .dev_loss_tmo = &srp_dev_loss_tmo, + .reconnect = srp_rport_reconnect, .rport_delete = srp_rport_delete, + .terminate_rport_io = srp_terminate_io, }; static int __init srp_init_module(void) diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index e641088c14dc..575681063f38 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -57,14 +57,11 @@ enum { SRP_MAX_LUN = 512, SRP_DEF_SG_TABLESIZE = 12, - SRP_RQ_SHIFT = 6, - SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, - - SRP_SQ_SIZE = SRP_RQ_SIZE, + SRP_DEFAULT_QUEUE_SIZE = 1 << 6, SRP_RSP_SQ_SIZE = 1, - SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, SRP_TSK_MGMT_SQ_SIZE = 1, - SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, + SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE - + SRP_TSK_MGMT_SQ_SIZE, SRP_TAG_NO_REQ = ~0U, SRP_TAG_TSK_MGMT = 1U << 31, @@ -140,7 +137,6 @@ struct srp_target_port { unsigned int cmd_sg_cnt; unsigned int indirect_size; bool allow_ext_sg; - bool transport_offline; /* Everything above this point is used in the hot path of * command processing. Try to keep them packed into cachelines. @@ -153,10 +149,14 @@ struct srp_target_port { u16 io_class; struct srp_host *srp_host; struct Scsi_Host *scsi_host; + struct srp_rport *rport; char target_name[32]; unsigned int scsi_id; unsigned int sg_tablesize; + int queue_size; + int req_ring_size; int comp_vector; + int tl_retry_count; struct ib_sa_path_rec path; __be16 orig_dgid[8]; @@ -172,10 +172,11 @@ struct srp_target_port { int zero_req_lim; - struct srp_iu *tx_ring[SRP_SQ_SIZE]; - struct srp_iu *rx_ring[SRP_RQ_SIZE]; - struct srp_request req_ring[SRP_CMD_SQ_SIZE]; + struct srp_iu **tx_ring; + struct srp_iu **rx_ring; + struct srp_request *req_ring; + struct work_struct tl_err_work; struct work_struct remove_work; struct list_head list; diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index 38b523a1ece0..a11ff74a5127 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -80,7 +80,7 @@ config INPUT_MATRIXKMAP comment "Userland interfaces" config INPUT_MOUSEDEV - tristate "Mouse interface" if EXPERT + tristate "Mouse interface" default y help Say Y here if you want your mouse to be accessible as char devices diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index b6ded17b3be3..a06e12552886 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -18,6 +18,8 @@ #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input/mt.h> @@ -369,7 +371,11 @@ static int evdev_release(struct inode *inode, struct file *file) mutex_unlock(&evdev->mutex); evdev_detach_client(evdev, client); - kfree(client); + + if (is_vmalloc_addr(client)) + vfree(client); + else + kfree(client); evdev_close_device(evdev); @@ -389,12 +395,14 @@ static int evdev_open(struct inode *inode, struct file *file) { struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev); unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev); + unsigned int size = sizeof(struct evdev_client) + + bufsize * sizeof(struct input_event); struct evdev_client *client; int error; - client = kzalloc(sizeof(struct evdev_client) + - bufsize * sizeof(struct input_event), - GFP_KERNEL); + client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!client) + client = vzalloc(size); if (!client) return -ENOMEM; diff --git a/drivers/input/input.c b/drivers/input/input.c index e75d015024a1..846ccdd905b1 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -2052,7 +2052,7 @@ int input_register_device(struct input_dev *dev) if (dev->hint_events_per_packet < packet_size) dev->hint_events_per_packet = packet_size; - dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2; + dev->max_vals = dev->hint_events_per_packet + 2; dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); if (!dev->vals) { error = -ENOMEM; diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index c1edd39bc5ba..bb174c1a9886 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -2,7 +2,7 @@ # Input core configuration # menuconfig INPUT_KEYBOARD - bool "Keyboards" if EXPERT || !X86 + bool "Keyboards" default y help Say Y here, and a list of supported keyboards will be displayed. @@ -67,7 +67,7 @@ config KEYBOARD_ATARI module will be called atakbd. config KEYBOARD_ATKBD - tristate "AT keyboard" if EXPERT || !X86 + tristate "AT keyboard" default y select SERIO select SERIO_LIBPS2 @@ -525,7 +525,7 @@ config KEYBOARD_SUNKBD config KEYBOARD_SH_KEYSC tristate "SuperH KEYSC keypad support" - depends on SUPERH || ARCH_SHMOBILE + depends on SUPERH || ARM || COMPILE_TEST help Say Y here if you want to use a keypad attached to the KEYSC block on SuperH processors such as sh7722 and sh7343. diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 440ce32462ba..2db13246eb8e 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -26,6 +26,7 @@ #include <linux/gpio_keys.h> #include <linux/workqueue.h> #include <linux/gpio.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/spinlock.h> diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c index cd5ed9e22168..4e428199e580 100644 --- a/drivers/input/keyboard/gpio_keys_polled.c +++ b/drivers/input/keyboard/gpio_keys_polled.c @@ -25,6 +25,7 @@ #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c index 42181435fe67..8b1b01361ec6 100644 --- a/drivers/input/keyboard/lpc32xx-keys.c +++ b/drivers/input/keyboard/lpc32xx-keys.c @@ -383,7 +383,7 @@ static struct platform_driver lpc32xx_kscan_driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &lpc32xx_kscan_pm_ops, - .of_match_table = of_match_ptr(lpc32xx_kscan_match), + .of_match_table = lpc32xx_kscan_match, } }; diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c index b3e3edab6d9f..b31064981e96 100644 --- a/drivers/input/keyboard/nspire-keypad.c +++ b/drivers/input/keyboard/nspire-keypad.c @@ -143,8 +143,10 @@ static int nspire_keypad_open(struct input_dev *input) return error; error = nspire_keypad_chip_init(keypad); - if (error) + if (error) { + clk_disable_unprepare(keypad->clk); return error; + } return 0; } @@ -267,7 +269,7 @@ static struct platform_driver nspire_keypad_driver = { .driver = { .name = "nspire-keypad", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(nspire_keypad_dt_match), + .of_match_table = nspire_keypad_dt_match, }, .probe = nspire_keypad_probe, }; diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index a2e758d27584..186138c720c7 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -27,6 +27,7 @@ #include <linux/err.h> #include <linux/input/matrix_keypad.h> #include <linux/slab.h> +#include <linux/of.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index 9cd20e6905a0..8508879f6faf 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -614,7 +614,7 @@ static int tegra_kbc_probe(struct platform_device *pdev) unsigned int keymap_rows; const struct of_device_id *match; - match = of_match_device(of_match_ptr(tegra_kbc_of_match), &pdev->dev); + match = of_match_device(tegra_kbc_of_match, &pdev->dev); kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL); if (!kbc) { diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c index 5f7b427dd7ed..8bd24d52bf1b 100644 --- a/drivers/input/keyboard/tnetv107x-keypad.c +++ b/drivers/input/keyboard/tnetv107x-keypad.c @@ -60,8 +60,8 @@ struct keypad_data { struct clk *clk; struct device *dev; spinlock_t lock; - u32 irq_press; - u32 irq_release; + int irq_press; + int irq_release; int rows, cols, row_shift; int debounce_ms, active_low; u32 prev_keys[3]; diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index aa51baaa9b1e..5f4967d01bc3 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -156,7 +156,7 @@ config INPUT_MAX8925_ONKEY config INPUT_MAX8997_HAPTIC tristate "MAXIM MAX8997 haptic controller support" - depends on HAVE_PWM && MFD_MAX8997 + depends on PWM && HAVE_PWM && MFD_MAX8997 select INPUT_FF_MEMLESS help This option enables device driver support for the haptic controller @@ -461,7 +461,7 @@ config INPUT_PCF8574 config INPUT_PWM_BEEPER tristate "PWM beeper support" - depends on HAVE_PWM || PWM + depends on PWM && HAVE_PWM help Say Y here to get support for PWM based beeper devices. diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index 61891486067c..3a90b710e309 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c @@ -108,7 +108,6 @@ static int ad714x_spi_remove(struct spi_device *spi) struct ad714x_chip *chip = spi_get_drvdata(spi); ad714x_remove(chip); - spi_set_drvdata(spi, NULL); return 0; } diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c index 4f77f87847e8..b5d71d245854 100644 --- a/drivers/input/misc/cobalt_btns.c +++ b/drivers/input/misc/cobalt_btns.c @@ -131,7 +131,6 @@ static int cobalt_buttons_probe(struct platform_device *pdev) err_free_mem: input_free_polled_device(poll_dev); kfree(bdev); - dev_set_drvdata(&pdev->dev, NULL); return error; } @@ -144,7 +143,6 @@ static int cobalt_buttons_remove(struct platform_device *pdev) input_free_polled_device(bdev->poll_dev); iounmap(bdev->reg); kfree(bdev); - dev_set_drvdata(dev, NULL); return 0; } diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index f3309696d053..59d4dcddf6de 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c @@ -168,7 +168,7 @@ static void mma8450_close(struct input_polled_dev *dev) * I2C init/probing/exit functions */ static int mma8450_probe(struct i2c_client *c, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { struct input_polled_dev *idev; struct mma8450 *m; @@ -204,6 +204,8 @@ static int mma8450_probe(struct i2c_client *c, goto err_free_mem; } + i2c_set_clientdata(c, m); + return 0; err_free_mem: diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c index dce0d95943c5..6983ffbbfb94 100644 --- a/drivers/input/misc/mpu3050.c +++ b/drivers/input/misc/mpu3050.c @@ -383,6 +383,7 @@ static int mpu3050_probe(struct i2c_client *client, pm_runtime_enable(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY); + i2c_set_clientdata(client, sensor); return 0; diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c index 2ff4d1c78ab8..940566e7be13 100644 --- a/drivers/input/misc/pwm-beeper.c +++ b/drivers/input/misc/pwm-beeper.c @@ -16,6 +16,7 @@ #include <linux/input.h> #include <linux/module.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c index fb4f8ac3343b..83fff38b86b3 100644 --- a/drivers/input/misc/rb532_button.c +++ b/drivers/input/misc/rb532_button.c @@ -87,7 +87,6 @@ static int rb532_button_remove(struct platform_device *pdev) input_unregister_polled_device(poll_dev); input_free_polled_device(poll_dev); - dev_set_drvdata(&pdev->dev, NULL); return 0; } diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c index 5b1aff825138..f920ba7ab51f 100644 --- a/drivers/input/misc/rotary_encoder.c +++ b/drivers/input/misc/rotary_encoder.c @@ -24,6 +24,7 @@ #include <linux/gpio.h> #include <linux/rotary_encoder.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c index 0621c367049a..7b8b03e0d0be 100644 --- a/drivers/input/misc/sirfsoc-onkey.c +++ b/drivers/input/misc/sirfsoc-onkey.c @@ -153,7 +153,7 @@ static struct platform_driver sirfsoc_pwrc_driver = { .name = "sirfsoc-pwrc", .owner = THIS_MODULE, .pm = &sirfsoc_pwrc_pm_ops, - .of_match_table = of_match_ptr(sirfsoc_pwrc_of_match), + .of_match_table = sirfsoc_pwrc_of_match, } }; diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index a0a4bbaef02c..772835938a52 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -430,20 +430,30 @@ static int uinput_setup_device(struct uinput_device *udev, return retval; } -static ssize_t uinput_inject_event(struct uinput_device *udev, - const char __user *buffer, size_t count) +static ssize_t uinput_inject_events(struct uinput_device *udev, + const char __user *buffer, size_t count) { struct input_event ev; + size_t bytes = 0; - if (count < input_event_size()) + if (count != 0 && count < input_event_size()) return -EINVAL; - if (input_event_from_user(buffer, &ev)) - return -EFAULT; + while (bytes + input_event_size() <= count) { + /* + * Note that even if some events were fetched successfully + * we are still going to return EFAULT instead of partial + * count to let userspace know that it got it's buffers + * all wrong. + */ + if (input_event_from_user(buffer + bytes, &ev)) + return -EFAULT; - input_event(udev->dev, ev.type, ev.code, ev.value); + input_event(udev->dev, ev.type, ev.code, ev.value); + bytes += input_event_size(); + } - return input_event_size(); + return bytes; } static ssize_t uinput_write(struct file *file, const char __user *buffer, @@ -460,7 +470,7 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer, return retval; retval = udev->state == UIST_CREATED ? - uinput_inject_event(udev, buffer, count) : + uinput_inject_events(udev, buffer, count) : uinput_setup_device(udev, buffer, count); mutex_unlock(&udev->mutex); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 83658472ad25..ca7a26f1dce8 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -103,7 +103,6 @@ static const struct alps_model_info alps_model_data[] = { /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, - { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ @@ -1793,7 +1792,7 @@ int alps_init(struct psmouse *psmouse) snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); dev2->phys = priv->phys; dev2->name = (priv->flags & ALPS_DUALPOINT) ? - "DualPoint Stick" : "PS/2 Mouse"; + "DualPoint Stick" : "ALPS PS/2 Device"; dev2->id.bustype = BUS_I8042; dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_ALPS; diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c index f51765fff054..a5869a856ea5 100644 --- a/drivers/input/mouse/cypress_ps2.c +++ b/drivers/input/mouse/cypress_ps2.c @@ -439,7 +439,7 @@ static int cypress_get_finger_count(unsigned char header_byte) case 2: return 5; default: /* Invalid contact (e.g. palm). Ignore it. */ - return -1; + return 0; } } @@ -452,17 +452,10 @@ static int cypress_parse_packet(struct psmouse *psmouse, { unsigned char *packet = psmouse->packet; unsigned char header_byte = packet[0]; - int contact_cnt; memset(report_data, 0, sizeof(struct cytp_report_data)); - contact_cnt = cypress_get_finger_count(header_byte); - - if (contact_cnt < 0) /* e.g. palm detect */ - return -EINVAL; - - report_data->contact_cnt = contact_cnt; - + report_data->contact_cnt = cypress_get_finger_count(header_byte); report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0; if (report_data->contact_cnt == 1) { @@ -535,11 +528,9 @@ static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt) int slots[CYTP_MAX_MT_SLOTS]; int n; - if (cypress_parse_packet(psmouse, cytp, &report_data)) - return; + cypress_parse_packet(psmouse, cytp, &report_data); n = report_data.contact_cnt; - if (n > CYTP_MAX_MT_SLOTS) n = CYTP_MAX_MT_SLOTS; @@ -605,10 +596,6 @@ static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse) return PSMOUSE_BAD_DATA; contact_cnt = cypress_get_finger_count(packet[0]); - - if (contact_cnt < 0) - return PSMOUSE_BAD_DATA; - if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE) cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4); else @@ -679,15 +666,15 @@ int cypress_init(struct psmouse *psmouse) { struct cytp_data *cytp; - cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL); - psmouse->private = (void *)cytp; - if (cytp == NULL) + cytp = kzalloc(sizeof(struct cytp_data), GFP_KERNEL); + if (!cytp) return -ENOMEM; - cypress_reset(psmouse); - + psmouse->private = cytp; psmouse->pktsize = 8; + cypress_reset(psmouse); + if (cypress_query_hardware(psmouse)) { psmouse_err(psmouse, "Unable to query Trackpad hardware.\n"); goto err_exit; diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index 1de1e5f8f795..8541f949778d 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -2,7 +2,7 @@ # Input core configuration # config SERIO - tristate "Serial I/O support" if EXPERT || !X86 + tristate "Serial I/O support" default y help Say Yes here if you have any input device that uses serial I/O to @@ -19,7 +19,7 @@ config SERIO if SERIO config SERIO_I8042 - tristate "i8042 PC Keyboard controller" if EXPERT || !X86 + tristate "i8042 PC Keyboard controller" default y depends on !PARISC && (!ARM || FOOTBRIDGE_HOST) && \ (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \ @@ -170,7 +170,7 @@ config SERIO_MACEPS2 module will be called maceps2. config SERIO_LIBPS2 - tristate "PS/2 driver library" if EXPERT + tristate "PS/2 driver library" depends on SERIO_I8042 || SERIO_I8042=n help Say Y here if you are using a driver for device connected @@ -266,4 +266,14 @@ config SERIO_OLPC_APSP To compile this driver as a module, choose M here: the module will be called olpc_apsp. +config HYPERV_KEYBOARD + tristate "Microsoft Synthetic Keyboard driver" + depends on HYPERV + default HYPERV + help + Select this option to enable the Hyper-V Keyboard driver. + + To compile this driver as a module, choose M here: the module will + be called hyperv_keyboard. + endif diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile index 12298b1c0e71..815d874fe724 100644 --- a/drivers/input/serio/Makefile +++ b/drivers/input/serio/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o obj-$(CONFIG_SERIO_ARC_PS2) += arc_ps2.o obj-$(CONFIG_SERIO_APBPS2) += apbps2.o obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o +obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c new file mode 100644 index 000000000000..3a83c3c14b23 --- /dev/null +++ b/drivers/input/serio/hyperv-keyboard.c @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2013, Microsoft Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/completion.h> +#include <linux/hyperv.h> +#include <linux/serio.h> +#include <linux/slab.h> + +/* + * Current version 1.0 + * + */ +#define SYNTH_KBD_VERSION_MAJOR 1 +#define SYNTH_KBD_VERSION_MINOR 0 +#define SYNTH_KBD_VERSION (SYNTH_KBD_VERSION_MINOR | \ + (SYNTH_KBD_VERSION_MAJOR << 16)) + + +/* + * Message types in the synthetic input protocol + */ +enum synth_kbd_msg_type { + SYNTH_KBD_PROTOCOL_REQUEST = 1, + SYNTH_KBD_PROTOCOL_RESPONSE = 2, + SYNTH_KBD_EVENT = 3, + SYNTH_KBD_LED_INDICATORS = 4, +}; + +/* + * Basic message structures. + */ +struct synth_kbd_msg_hdr { + __le32 type; +}; + +struct synth_kbd_msg { + struct synth_kbd_msg_hdr header; + char data[]; /* Enclosed message */ +}; + +union synth_kbd_version { + __le32 version; +}; + +/* + * Protocol messages + */ +struct synth_kbd_protocol_request { + struct synth_kbd_msg_hdr header; + union synth_kbd_version version_requested; +}; + +#define PROTOCOL_ACCEPTED BIT(0) +struct synth_kbd_protocol_response { + struct synth_kbd_msg_hdr header; + __le32 proto_status; +}; + +#define IS_UNICODE BIT(0) +#define IS_BREAK BIT(1) +#define IS_E0 BIT(2) +#define IS_E1 BIT(3) +struct synth_kbd_keystroke { + struct synth_kbd_msg_hdr header; + __le16 make_code; + __le16 reserved0; + __le32 info; /* Additional information */ +}; + + +#define HK_MAXIMUM_MESSAGE_SIZE 256 + +#define KBD_VSC_SEND_RING_BUFFER_SIZE (10 * PAGE_SIZE) +#define KBD_VSC_RECV_RING_BUFFER_SIZE (10 * PAGE_SIZE) + +#define XTKBD_EMUL0 0xe0 +#define XTKBD_EMUL1 0xe1 +#define XTKBD_RELEASE 0x80 + + +/* + * Represents a keyboard device + */ +struct hv_kbd_dev { + struct hv_device *hv_dev; + struct serio *hv_serio; + struct synth_kbd_protocol_request protocol_req; + struct synth_kbd_protocol_response protocol_resp; + /* Synchronize the request/response if needed */ + struct completion wait_event; + spinlock_t lock; /* protects 'started' field */ + bool started; +}; + +static void hv_kbd_on_receive(struct hv_device *hv_dev, + struct synth_kbd_msg *msg, u32 msg_length) +{ + struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); + struct synth_kbd_keystroke *ks_msg; + unsigned long flags; + u32 msg_type = __le32_to_cpu(msg->header.type); + u32 info; + u16 scan_code; + + switch (msg_type) { + case SYNTH_KBD_PROTOCOL_RESPONSE: + /* + * Validate the information provided by the host. + * If the host is giving us a bogus packet, + * drop the packet (hoping the problem + * goes away). + */ + if (msg_length < sizeof(struct synth_kbd_protocol_response)) { + dev_err(&hv_dev->device, + "Illegal protocol response packet (len: %d)\n", + msg_length); + break; + } + + memcpy(&kbd_dev->protocol_resp, msg, + sizeof(struct synth_kbd_protocol_response)); + complete(&kbd_dev->wait_event); + break; + + case SYNTH_KBD_EVENT: + /* + * Validate the information provided by the host. + * If the host is giving us a bogus packet, + * drop the packet (hoping the problem + * goes away). + */ + if (msg_length < sizeof(struct synth_kbd_keystroke)) { + dev_err(&hv_dev->device, + "Illegal keyboard event packet (len: %d)\n", + msg_length); + break; + } + + ks_msg = (struct synth_kbd_keystroke *)msg; + info = __le32_to_cpu(ks_msg->info); + + /* + * Inject the information through the serio interrupt. + */ + spin_lock_irqsave(&kbd_dev->lock, flags); + if (kbd_dev->started) { + if (info & IS_E0) + serio_interrupt(kbd_dev->hv_serio, + XTKBD_EMUL0, 0); + + scan_code = __le16_to_cpu(ks_msg->make_code); + if (info & IS_BREAK) + scan_code |= XTKBD_RELEASE; + + serio_interrupt(kbd_dev->hv_serio, scan_code, 0); + } + spin_unlock_irqrestore(&kbd_dev->lock, flags); + break; + + default: + dev_err(&hv_dev->device, + "unhandled message type %d\n", msg_type); + } +} + +static void hv_kbd_handle_received_packet(struct hv_device *hv_dev, + struct vmpacket_descriptor *desc, + u32 bytes_recvd, + u64 req_id) +{ + struct synth_kbd_msg *msg; + u32 msg_sz; + + switch (desc->type) { + case VM_PKT_COMP: + break; + + case VM_PKT_DATA_INBAND: + /* + * We have a packet that has "inband" data. The API used + * for retrieving the packet guarantees that the complete + * packet is read. So, minimally, we should be able to + * parse the payload header safely (assuming that the host + * can be trusted. Trusting the host seems to be a + * reasonable assumption because in a virtualized + * environment there is not whole lot you can do if you + * don't trust the host. + * + * Nonetheless, let us validate if the host can be trusted + * (in a trivial way). The interesting aspect of this + * validation is how do you recover if we discover that the + * host is not to be trusted? Simply dropping the packet, I + * don't think is an appropriate recovery. In the interest + * of failing fast, it may be better to crash the guest. + * For now, I will just drop the packet! + */ + + msg_sz = bytes_recvd - (desc->offset8 << 3); + if (msg_sz <= sizeof(struct synth_kbd_msg_hdr)) { + /* + * Drop the packet and hope + * the problem magically goes away. + */ + dev_err(&hv_dev->device, + "Illegal packet (type: %d, tid: %llx, size: %d)\n", + desc->type, req_id, msg_sz); + break; + } + + msg = (void *)desc + (desc->offset8 << 3); + hv_kbd_on_receive(hv_dev, msg, msg_sz); + break; + + default: + dev_err(&hv_dev->device, + "unhandled packet type %d, tid %llx len %d\n", + desc->type, req_id, bytes_recvd); + break; + } +} + +static void hv_kbd_on_channel_callback(void *context) +{ + struct hv_device *hv_dev = context; + void *buffer; + int bufferlen = 0x100; /* Start with sensible size */ + u32 bytes_recvd; + u64 req_id; + int error; + + buffer = kmalloc(bufferlen, GFP_ATOMIC); + if (!buffer) + return; + + while (1) { + error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen, + &bytes_recvd, &req_id); + switch (error) { + case 0: + if (bytes_recvd == 0) { + kfree(buffer); + return; + } + + hv_kbd_handle_received_packet(hv_dev, buffer, + bytes_recvd, req_id); + break; + + case -ENOBUFS: + kfree(buffer); + /* Handle large packet */ + bufferlen = bytes_recvd; + buffer = kmalloc(bytes_recvd, GFP_ATOMIC); + if (!buffer) + return; + break; + } + } +} + +static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev) +{ + struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); + struct synth_kbd_protocol_request *request; + struct synth_kbd_protocol_response *response; + u32 proto_status; + int error; + + request = &kbd_dev->protocol_req; + memset(request, 0, sizeof(struct synth_kbd_protocol_request)); + request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST); + request->version_requested.version = __cpu_to_le32(SYNTH_KBD_VERSION); + + error = vmbus_sendpacket(hv_dev->channel, request, + sizeof(struct synth_kbd_protocol_request), + (unsigned long)request, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (error) + return error; + + if (!wait_for_completion_timeout(&kbd_dev->wait_event, 10 * HZ)) + return -ETIMEDOUT; + + response = &kbd_dev->protocol_resp; + proto_status = __le32_to_cpu(response->proto_status); + if (!(proto_status & PROTOCOL_ACCEPTED)) { + dev_err(&hv_dev->device, + "synth_kbd protocol request failed (version %d)\n", + SYNTH_KBD_VERSION); + return -ENODEV; + } + + return 0; +} + +static int hv_kbd_start(struct serio *serio) +{ + struct hv_kbd_dev *kbd_dev = serio->port_data; + unsigned long flags; + + spin_lock_irqsave(&kbd_dev->lock, flags); + kbd_dev->started = true; + spin_unlock_irqrestore(&kbd_dev->lock, flags); + + return 0; +} + +static void hv_kbd_stop(struct serio *serio) +{ + struct hv_kbd_dev *kbd_dev = serio->port_data; + unsigned long flags; + + spin_lock_irqsave(&kbd_dev->lock, flags); + kbd_dev->started = false; + spin_unlock_irqrestore(&kbd_dev->lock, flags); +} + +static int hv_kbd_probe(struct hv_device *hv_dev, + const struct hv_vmbus_device_id *dev_id) +{ + struct hv_kbd_dev *kbd_dev; + struct serio *hv_serio; + int error; + + kbd_dev = kzalloc(sizeof(struct hv_kbd_dev), GFP_KERNEL); + hv_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); + if (!kbd_dev || !hv_serio) { + error = -ENOMEM; + goto err_free_mem; + } + + kbd_dev->hv_dev = hv_dev; + kbd_dev->hv_serio = hv_serio; + spin_lock_init(&kbd_dev->lock); + init_completion(&kbd_dev->wait_event); + hv_set_drvdata(hv_dev, kbd_dev); + + hv_serio->dev.parent = &hv_dev->device; + hv_serio->id.type = SERIO_8042_XL; + hv_serio->port_data = kbd_dev; + strlcpy(hv_serio->name, dev_name(&hv_dev->device), + sizeof(hv_serio->name)); + strlcpy(hv_serio->phys, dev_name(&hv_dev->device), + sizeof(hv_serio->phys)); + + hv_serio->start = hv_kbd_start; + hv_serio->stop = hv_kbd_stop; + + error = vmbus_open(hv_dev->channel, + KBD_VSC_SEND_RING_BUFFER_SIZE, + KBD_VSC_RECV_RING_BUFFER_SIZE, + NULL, 0, + hv_kbd_on_channel_callback, + hv_dev); + if (error) + goto err_free_mem; + + error = hv_kbd_connect_to_vsp(hv_dev); + if (error) + goto err_close_vmbus; + + serio_register_port(kbd_dev->hv_serio); + return 0; + +err_close_vmbus: + vmbus_close(hv_dev->channel); +err_free_mem: + kfree(hv_serio); + kfree(kbd_dev); + return error; +} + +static int hv_kbd_remove(struct hv_device *hv_dev) +{ + struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); + + serio_unregister_port(kbd_dev->hv_serio); + vmbus_close(hv_dev->channel); + kfree(kbd_dev); + + hv_set_drvdata(hv_dev, NULL); + + return 0; +} + +/* + * Keyboard GUID + * {f912ad6d-2b17-48ea-bd65-f927a61c7684} + */ +#define HV_KBD_GUID \ + .guid = { \ + 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, \ + 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 \ + } + +static const struct hv_vmbus_device_id id_table[] = { + /* Keyboard guid */ + { HV_KBD_GUID, }, + { }, +}; + +MODULE_DEVICE_TABLE(vmbus, id_table); + +static struct hv_driver hv_kbd_drv = { + .name = KBUILD_MODNAME, + .id_table = id_table, + .probe = hv_kbd_probe, + .remove = hv_kbd_remove, +}; + +static int __init hv_kbd_init(void) +{ + return vmbus_driver_register(&hv_kbd_drv); +} + +static void __exit hv_kbd_exit(void) +{ + vmbus_driver_unregister(&hv_kbd_drv); +} + +MODULE_LICENSE("GPL"); +module_init(hv_kbd_init); +module_exit(hv_kbd_exit); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 5f306f79da0c..0ec9abbe31fe 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -765,6 +765,7 @@ static struct pnp_device_id pnp_kbd_devids[] = { { .id = "CPQA0D7", .driver_data = 0 }, { .id = "", }, }; +MODULE_DEVICE_TABLE(pnp, pnp_kbd_devids); static struct pnp_driver i8042_pnp_kbd_driver = { .name = "i8042 kbd", @@ -786,6 +787,7 @@ static struct pnp_device_id pnp_aux_devids[] = { { .id = "SYN0801", .driver_data = 0 }, { .id = "", }, }; +MODULE_DEVICE_TABLE(pnp, pnp_aux_devids); static struct pnp_driver i8042_pnp_aux_driver = { .name = "i8042 aux", diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 52c9ebf94729..020053fa5aaa 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1036,7 +1036,7 @@ static void i8042_controller_reset(bool force_reset) /* * i8042_panic_blink() will turn the keyboard LEDs on or off and is called * when kernel panics. Flashing LEDs is useful for users running X who may - * not see the console and will help distingushing panics from "real" + * not see the console and will help distinguishing panics from "real" * lockups. * * Note that DELAY has a limit of 10ms so we will not get stuck here diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index e53416a4d7f3..867e7c33ac55 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -524,9 +524,6 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT, report_id, rep_data, length, 1); - if (error >= 0) - error = wacom_get_report(intf, WAC_HID_FEATURE_REPORT, - report_id, rep_data, length, 1); } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES); kfree(rep_data); @@ -548,7 +545,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat /* MT Tablet PC touch */ return wacom_set_device_mode(intf, 3, 4, 4); } - else if (features->type == WACOM_24HDT) { + else if (features->type == WACOM_24HDT || features->type == CINTIQ_HYBRID) { return wacom_set_device_mode(intf, 18, 3, 2); } } else if (features->device_type == BTN_TOOL_PEN) { @@ -719,7 +716,7 @@ static int wacom_led_control(struct wacom *wacom) return -ENOMEM; if (wacom->wacom_wac.features.type >= INTUOS5S && - wacom->wacom_wac.features.type <= INTUOS5L) { + wacom->wacom_wac.features.type <= INTUOSPL) { /* * Touch Ring and crop mark LED luminance may take on * one of four values: @@ -981,14 +978,20 @@ static int wacom_initialize_leds(struct wacom *wacom) case INTUOS5S: case INTUOS5: case INTUOS5L: - wacom->led.select[0] = 0; - wacom->led.select[1] = 0; - wacom->led.llv = 32; - wacom->led.hlv = 0; - wacom->led.img_lum = 0; - - error = sysfs_create_group(&wacom->intf->dev.kobj, - &intuos5_led_attr_group); + case INTUOSPS: + case INTUOSPM: + case INTUOSPL: + if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) { + wacom->led.select[0] = 0; + wacom->led.select[1] = 0; + wacom->led.llv = 32; + wacom->led.hlv = 0; + wacom->led.img_lum = 0; + + error = sysfs_create_group(&wacom->intf->dev.kobj, + &intuos5_led_attr_group); + } else + return 0; break; default: @@ -1024,8 +1027,12 @@ static void wacom_destroy_leds(struct wacom *wacom) case INTUOS5S: case INTUOS5: case INTUOS5L: - sysfs_remove_group(&wacom->intf->dev.kobj, - &intuos5_led_attr_group); + case INTUOSPS: + case INTUOSPM: + case INTUOSPL: + if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) + sysfs_remove_group(&wacom->intf->dev.kobj, + &intuos5_led_attr_group); break; } } @@ -1185,34 +1192,47 @@ static void wacom_wireless_work(struct work_struct *work) wacom_wac1->features = *((struct wacom_features *)id->driver_info); wacom_wac1->features.device_type = BTN_TOOL_PEN; + snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen", + wacom_wac1->features.name); error = wacom_register_input(wacom1); if (error) - goto fail1; + goto fail; /* Touch interface */ - wacom_wac2->features = - *((struct wacom_features *)id->driver_info); - wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; - wacom_wac2->features.device_type = BTN_TOOL_FINGER; - wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; - error = wacom_register_input(wacom2); - if (error) - goto fail2; + if (wacom_wac1->features.touch_max) { + wacom_wac2->features = + *((struct wacom_features *)id->driver_info); + wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; + wacom_wac2->features.device_type = BTN_TOOL_FINGER; + wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; + if (wacom_wac2->features.touch_max) + snprintf(wacom_wac2->name, WACOM_NAME_MAX, + "%s (WL) Finger",wacom_wac2->features.name); + else + snprintf(wacom_wac2->name, WACOM_NAME_MAX, + "%s (WL) Pad",wacom_wac2->features.name); + error = wacom_register_input(wacom2); + if (error) + goto fail; + } error = wacom_initialize_battery(wacom); if (error) - goto fail3; + goto fail; } return; -fail3: - input_unregister_device(wacom_wac2->input); - wacom_wac2->input = NULL; -fail2: - input_unregister_device(wacom_wac1->input); - wacom_wac1->input = NULL; -fail1: +fail: + if (wacom_wac2->input) { + input_unregister_device(wacom_wac2->input); + wacom_wac2->input = NULL; + } + + if (wacom_wac1->input) { + input_unregister_device(wacom_wac1->input); + wacom_wac1->input = NULL; + } return; } @@ -1302,7 +1322,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i * HID descriptor. If this is the touch interface (wMaxPacketSize * of WACOM_PKGLEN_BBTOUCH3), override the table values. */ - if (features->type >= INTUOS5S && features->type <= INTUOS5L) { + if (features->type >= INTUOS5S && features->type <= INTUOSPL) { if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) { features->device_type = BTN_TOOL_FINGER; features->pktlen = WACOM_PKGLEN_BBTOUCH3; @@ -1329,10 +1349,12 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i struct usb_device *other_dev; /* Append the device type to the name */ - strlcat(wacom_wac->name, - features->device_type == BTN_TOOL_PEN ? - " Pen" : " Finger", - sizeof(wacom_wac->name)); + if (features->device_type != BTN_TOOL_FINGER) + strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX); + else if (features->touch_max) + strlcat(wacom_wac->name, " Finger", WACOM_NAME_MAX); + else + strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX); other_dev = wacom_get_sibling(dev, features->oVid, features->oPid); if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL) diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index c59b797eeafa..782c2535f1d8 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -427,6 +427,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) (features->type == WACOM_21UX2)) return 1; + /* Range Report */ + if ((data[1] & 0xfe) == 0x20) { + input_report_key(input, BTN_TOUCH, 0); + input_report_abs(input, ABS_PRESSURE, 0); + input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max); + } + /* Exit report */ if ((data[1] & 0xfe) == 0x80) { if (features->quirks == WACOM_QUIRK_MULTI_INPUT) @@ -477,7 +484,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom) /* general pen packet */ if ((data[1] & 0xb8) == 0xa0) { t = (data[6] << 2) | ((data[7] >> 6) & 3); - if (features->type >= INTUOS4S && features->type <= WACOM_24HD) { + if (features->type >= INTUOS4S && features->type <= CINTIQ_HYBRID) { t = (t << 1) | (data[1] & 1); } input_report_abs(input, ABS_PRESSURE, t); @@ -621,14 +628,30 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } else { input_report_abs(input, ABS_MISC, 0); } - } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) { + } else if (features->type == CINTIQ_HYBRID) { + /* + * Do not send hardware buttons under Android. They + * are already sent to the system through GPIO (and + * have different meaning). + */ + input_report_key(input, BTN_1, (data[4] & 0x01)); + input_report_key(input, BTN_2, (data[4] & 0x02)); + input_report_key(input, BTN_3, (data[4] & 0x04)); + input_report_key(input, BTN_4, (data[4] & 0x08)); + + input_report_key(input, BTN_5, (data[4] & 0x10)); /* Right */ + input_report_key(input, BTN_6, (data[4] & 0x20)); /* Up */ + input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */ + input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */ + input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */ + } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) { int i; /* Touch ring mode switch has no capacitive sensor */ input_report_key(input, BTN_0, (data[3] & 0x01)); /* - * ExpressKeys on Intuos5 have a capacitive sensor in + * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in * addition to the mechanical switch. Switch data is * stored in data[4], capacitive data in data[5]. */ @@ -716,7 +739,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) features->type == INTUOS4 || features->type == INTUOS4S || features->type == INTUOS5 || - features->type == INTUOS5S)) { + features->type == INTUOS5S || + features->type == INTUOSPM || + features->type == INTUOSPS)) { return 0; } @@ -769,8 +794,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } else if (wacom->tool[idx] == BTN_TOOL_MOUSE) { /* I4 mouse */ - if ((features->type >= INTUOS4S && features->type <= INTUOS4L) || - (features->type >= INTUOS5S && features->type <= INTUOS5L)) { + if (features->type >= INTUOS4S && features->type <= INTUOSPL) { input_report_key(input, BTN_LEFT, data[6] & 0x01); input_report_key(input, BTN_MIDDLE, data[6] & 0x02); input_report_key(input, BTN_RIGHT, data[6] & 0x04); @@ -797,7 +821,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } } } else if ((features->type < INTUOS3S || features->type == INTUOS3L || - features->type == INTUOS4L || features->type == INTUOS5L) && + features->type == INTUOS4L || features->type == INTUOS5L || + features->type == INTUOSPL) && wacom->tool[idx] == BTN_TOOL_LENS) { /* Lens cursor packets */ input_report_key(input, BTN_LEFT, data[8] & 0x01); @@ -1107,6 +1132,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) { + struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->input; bool touch = data[1] & 0x80; int slot = input_mt_get_slot_by_key(input, data[0]); @@ -1122,14 +1148,23 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) if (touch) { int x = (data[2] << 4) | (data[4] >> 4); int y = (data[3] << 4) | (data[4] & 0x0f); - int a = data[5]; + int width, height; - // "a" is a scaled-down area which we assume is roughly - // circular and which can be described as: a=(pi*r^2)/C. - int x_res = input_abs_get_res(input, ABS_X); - int y_res = input_abs_get_res(input, ABS_Y); - int width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); - int height = width * y_res / x_res; + if (features->type >= INTUOSPS && features->type <= INTUOSPL) { + width = data[5]; + height = data[6]; + } else { + /* + * "a" is a scaled-down area which we assume is + * roughly circular and which can be described as: + * a=(pi*r^2)/C. + */ + int a = data[5]; + int x_res = input_abs_get_res(input, ABS_X); + int y_res = input_abs_get_res(input, ABS_Y); + width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); + height = width * y_res / x_res; + } input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); @@ -1327,6 +1362,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) case WACOM_22HD: case WACOM_24HD: case DTK: + case CINTIQ_HYBRID: sync = wacom_intuos_irq(wacom_wac); break; @@ -1337,6 +1373,9 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) case INTUOS5S: case INTUOS5: case INTUOS5L: + case INTUOSPS: + case INTUOSPM: + case INTUOSPL: if (len == WACOM_PKGLEN_BBTOUCH3) sync = wacom_bpt3_touch(wacom_wac); else @@ -1420,7 +1459,7 @@ void wacom_setup_device_quirks(struct wacom_features *features) /* these device have multiple inputs */ if (features->type >= WIRELESS || - (features->type >= INTUOS5S && features->type <= INTUOS5L) || + (features->type >= INTUOS5S && features->type <= INTUOSPL) || (features->oVid && features->oPid)) features->quirks |= WACOM_QUIRK_MULTI_INPUT; @@ -1627,6 +1666,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case INTUOS5: case INTUOS5L: + case INTUOSPM: + case INTUOSPL: if (features->device_type == BTN_TOOL_PEN) { __set_bit(BTN_7, input_dev->keybit); __set_bit(BTN_8, input_dev->keybit); @@ -1634,6 +1675,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, /* fall through */ case INTUOS5S: + case INTUOSPS: __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (features->device_type == BTN_TOOL_PEN) { @@ -1765,6 +1807,24 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, 0, 0); } break; + + case CINTIQ_HYBRID: + __set_bit(BTN_1, input_dev->keybit); + __set_bit(BTN_2, input_dev->keybit); + __set_bit(BTN_3, input_dev->keybit); + __set_bit(BTN_4, input_dev->keybit); + + __set_bit(BTN_5, input_dev->keybit); + __set_bit(BTN_6, input_dev->keybit); + __set_bit(BTN_7, input_dev->keybit); + __set_bit(BTN_8, input_dev->keybit); + __set_bit(BTN_0, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + + wacom_setup_cintiq(wacom_wac); + break; } return 0; } @@ -1952,6 +2012,18 @@ static const struct wacom_features wacom_features_0x29 = static const struct wacom_features wacom_features_0x2A = { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0x314 = + { "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, + 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + .touch_max = 16 }; +static const struct wacom_features wacom_features_0x315 = + { "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, + 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + .touch_max = 16 }; +static const struct wacom_features wacom_features_0x317 = + { "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, + 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + .touch_max = 16 }; static const struct wacom_features wacom_features_0xF4 = { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; @@ -2131,6 +2203,13 @@ static const struct wacom_features wacom_features_0x301 = static const struct wacom_features wacom_features_0x6004 = { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x0307 = + { "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS, 59552, 33848, 2047, + 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 }; +static const struct wacom_features wacom_features_0x0309 = + { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */ + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10 }; #define USB_DEVICE_WACOM(prod) \ USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ @@ -2259,12 +2338,17 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x300) }, { USB_DEVICE_WACOM(0x301) }, { USB_DEVICE_WACOM(0x304) }, + { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, { USB_DEVICE_WACOM(0xF8) }, { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xFA) }, + { USB_DEVICE_WACOM(0x0307) }, + { USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_LENOVO(0x6004) }, { } }; diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h index dfc9e08e7f70..fd23a3790605 100644 --- a/drivers/input/tablet/wacom_wac.h +++ b/drivers/input/tablet/wacom_wac.h @@ -14,6 +14,8 @@ /* maximum packet length for USB devices */ #define WACOM_PKGLEN_MAX 64 +#define WACOM_NAME_MAX 64 + /* packet length for individual models */ #define WACOM_PKGLEN_PENPRTN 7 #define WACOM_PKGLEN_GRAPHIRE 8 @@ -76,10 +78,14 @@ enum { INTUOS5S, INTUOS5, INTUOS5L, + INTUOSPS, + INTUOSPM, + INTUOSPL, WACOM_21UX2, WACOM_22HD, DTK, WACOM_24HD, + CINTIQ_HYBRID, CINTIQ, WACOM_BEE, WACOM_13HD, @@ -126,7 +132,7 @@ struct wacom_shared { }; struct wacom_wac { - char name[64]; + char name[WACOM_NAME_MAX]; unsigned char *data; int tool[2]; int id[2]; diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index e09ec67957a3..00d1e547b211 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -919,4 +919,17 @@ config TOUCHSCREEN_TPS6507X To compile this driver as a module, choose M here: the module will be called tps6507x_ts. +config TOUCHSCREEN_ZFORCE + tristate "Neonode zForce infrared touchscreens" + depends on I2C + depends on GPIOLIB + help + Say Y here if you have a touchscreen using the zforce + infraread technology from Neonode. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called zforce_ts. + endif diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index f5216c1bf53e..7587883b8d38 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -75,3 +75,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o +obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c index f3a174a83c82..69834dd3c313 100644 --- a/drivers/input/touchscreen/ad7877.c +++ b/drivers/input/touchscreen/ad7877.c @@ -806,7 +806,6 @@ err_free_irq: err_free_mem: input_free_device(input_dev); kfree(ts); - spi_set_drvdata(spi, NULL); return err; } @@ -823,7 +822,6 @@ static int ad7877_remove(struct spi_device *spi) kfree(ts); dev_dbg(&spi->dev, "unregistered touchscreen\n"); - spi_set_drvdata(spi, NULL); return 0; } diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c index 606da5bd6115..1a7b1143536e 100644 --- a/drivers/input/touchscreen/ad7879-spi.c +++ b/drivers/input/touchscreen/ad7879-spi.c @@ -142,7 +142,6 @@ static int ad7879_spi_remove(struct spi_device *spi) struct ad7879 *ts = spi_get_drvdata(spi); ad7879_remove(ts); - spi_set_drvdata(spi, NULL); return 0; } diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c index d038575f49db..42d830efa316 100644 --- a/drivers/input/touchscreen/cyttsp4_core.c +++ b/drivers/input/touchscreen/cyttsp4_core.c @@ -2113,7 +2113,6 @@ error_startup: error_request_irq: if (cd->cpdata->init) cd->cpdata->init(cd->cpdata, 0, dev); - dev_set_drvdata(dev, NULL); error_free_xfer: kfree(cd->xfer_buf); error_free_cd: @@ -2151,7 +2150,6 @@ int cyttsp4_remove(struct cyttsp4 *cd) free_irq(cd->irq, cd); if (cd->cpdata->init) cd->cpdata->init(cd->cpdata, 0, dev); - dev_set_drvdata(dev, NULL); cyttsp4_free_si_ptrs(cd); kfree(cd); return 0; diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c index a71e1141d638..b19434cebbf6 100644 --- a/drivers/input/touchscreen/cyttsp4_spi.c +++ b/drivers/input/touchscreen/cyttsp4_spi.c @@ -171,10 +171,7 @@ static int cyttsp4_spi_probe(struct spi_device *spi) ts = cyttsp4_probe(&cyttsp_spi_bus_ops, &spi->dev, spi->irq, CY_SPI_DATA_BUF_SIZE); - if (IS_ERR(ts)) - return PTR_ERR(ts); - - return 0; + return PTR_ERR_OR_ZERO(ts); } static int cyttsp4_spi_remove(struct spi_device *spi) diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c index ef5fcb0945e9..054d22583248 100644 --- a/drivers/input/touchscreen/egalax_ts.c +++ b/drivers/input/touchscreen/egalax_ts.c @@ -273,7 +273,7 @@ static struct i2c_driver egalax_ts_driver = { .name = "egalax_ts", .owner = THIS_MODULE, .pm = &egalax_ts_pm_ops, - .of_match_table = of_match_ptr(egalax_ts_dt_ids), + .of_match_table = egalax_ts_dt_ids, }, .id_table = egalax_ts_id, .probe = egalax_ts_probe, diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c index 66500852341b..92e2243fb77d 100644 --- a/drivers/input/touchscreen/htcpen.c +++ b/drivers/input/touchscreen/htcpen.c @@ -186,8 +186,6 @@ static int htcpen_isa_remove(struct device *dev, unsigned int id) release_region(HTCPEN_PORT_INIT, 1); release_region(HTCPEN_PORT_IRQ_CLEAR, 1); - dev_set_drvdata(dev, NULL); - return 0; } diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index 1740a2496371..2f03b2f289dd 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -24,6 +24,7 @@ #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/pm_qos.h> #include <linux/slab.h> diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 24e625c0b531..68beadaabceb 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -354,9 +354,16 @@ static int titsc_parse_dt(struct platform_device *pdev, if (err < 0) return err; - err = of_property_read_u32(node, "ti,coordiante-readouts", + /* + * Try with the new binding first. If it fails, try again with + * bogus, miss-spelled version. + */ + err = of_property_read_u32(node, "ti,coordinate-readouts", &ts_dev->coordinate_readouts); if (err < 0) + err = of_property_read_u32(node, "ti,coordiante-readouts", + &ts_dev->coordinate_readouts); + if (err < 0) return err; return of_property_read_u32_array(node, "ti,wire-config", @@ -511,7 +518,7 @@ static struct platform_driver ti_tsc_driver = { .name = "TI-am335x-tsc", .owner = THIS_MODULE, .pm = TITSC_PM_OPS, - .of_match_table = of_match_ptr(ti_tsc_dt_ids), + .of_match_table = ti_tsc_dt_ids, }, }; module_platform_driver(ti_tsc_driver); diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c index 7213e8b07e79..811353353917 100644 --- a/drivers/input/touchscreen/tsc2005.c +++ b/drivers/input/touchscreen/tsc2005.c @@ -678,7 +678,6 @@ static int tsc2005_probe(struct spi_device *spi) err_remove_sysfs: sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group); err_clear_drvdata: - spi_set_drvdata(spi, NULL); free_irq(spi->irq, ts); err_free_mem: input_free_device(input_dev); @@ -696,7 +695,6 @@ static int tsc2005_remove(struct spi_device *spi) input_unregister_device(ts->idev); kfree(ts); - spi_set_drvdata(spi, NULL); return 0; } diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 721fdb3597ca..ae4b6b903629 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -146,12 +146,10 @@ enum { #define USB_DEVICE_HID_CLASS(vend, prod) \ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \ - | USB_DEVICE_ID_MATCH_INT_PROTOCOL \ | USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod), \ - .bInterfaceClass = USB_INTERFACE_CLASS_HID, \ - .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE + .bInterfaceClass = USB_INTERFACE_CLASS_HID static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c new file mode 100644 index 000000000000..75762d6ff3ba --- /dev/null +++ b/drivers/input/touchscreen/zforce_ts.c @@ -0,0 +1,836 @@ +/* + * Copyright (C) 2012-2013 MundoReader S.L. + * Author: Heiko Stuebner <heiko@sntech.de> + * + * based in parts on Nook zforce driver + * + * Copyright (C) 2010 Barnes & Noble, Inc. + * Author: Pieter Truter<ptruter@intrinsyc.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/hrtimer.h> +#include <linux/slab.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/i2c.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/input/mt.h> +#include <linux/platform_data/zforce_ts.h> + +#define WAIT_TIMEOUT msecs_to_jiffies(1000) + +#define FRAME_START 0xee + +/* Offsets of the different parts of the payload the controller sends */ +#define PAYLOAD_HEADER 0 +#define PAYLOAD_LENGTH 1 +#define PAYLOAD_BODY 2 + +/* Response offsets */ +#define RESPONSE_ID 0 +#define RESPONSE_DATA 1 + +/* Commands */ +#define COMMAND_DEACTIVATE 0x00 +#define COMMAND_INITIALIZE 0x01 +#define COMMAND_RESOLUTION 0x02 +#define COMMAND_SETCONFIG 0x03 +#define COMMAND_DATAREQUEST 0x04 +#define COMMAND_SCANFREQ 0x08 +#define COMMAND_STATUS 0X1e + +/* + * Responses the controller sends as a result of + * command requests + */ +#define RESPONSE_DEACTIVATE 0x00 +#define RESPONSE_INITIALIZE 0x01 +#define RESPONSE_RESOLUTION 0x02 +#define RESPONSE_SETCONFIG 0x03 +#define RESPONSE_SCANFREQ 0x08 +#define RESPONSE_STATUS 0X1e + +/* + * Notifications are send by the touch controller without + * being requested by the driver and include for example + * touch indications + */ +#define NOTIFICATION_TOUCH 0x04 +#define NOTIFICATION_BOOTCOMPLETE 0x07 +#define NOTIFICATION_OVERRUN 0x25 +#define NOTIFICATION_PROXIMITY 0x26 +#define NOTIFICATION_INVALID_COMMAND 0xfe + +#define ZFORCE_REPORT_POINTS 2 +#define ZFORCE_MAX_AREA 0xff + +#define STATE_DOWN 0 +#define STATE_MOVE 1 +#define STATE_UP 2 + +#define SETCONFIG_DUALTOUCH (1 << 0) + +struct zforce_point { + int coord_x; + int coord_y; + int state; + int id; + int area_major; + int area_minor; + int orientation; + int pressure; + int prblty; +}; + +/* + * @client the i2c_client + * @input the input device + * @suspending in the process of going to suspend (don't emit wakeup + * events for commands executed to suspend the device) + * @suspended device suspended + * @access_mutex serialize i2c-access, to keep multipart reads together + * @command_done completion to wait for the command result + * @command_mutex serialize commands send to the ic + * @command_waiting the id of the command that that is currently waiting + * for a result + * @command_result returned result of the command + */ +struct zforce_ts { + struct i2c_client *client; + struct input_dev *input; + const struct zforce_ts_platdata *pdata; + char phys[32]; + + bool suspending; + bool suspended; + bool boot_complete; + + /* Firmware version information */ + u16 version_major; + u16 version_minor; + u16 version_build; + u16 version_rev; + + struct mutex access_mutex; + + struct completion command_done; + struct mutex command_mutex; + int command_waiting; + int command_result; +}; + +static int zforce_command(struct zforce_ts *ts, u8 cmd) +{ + struct i2c_client *client = ts->client; + char buf[3]; + int ret; + + dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd); + + buf[0] = FRAME_START; + buf[1] = 1; /* data size, command only */ + buf[2] = cmd; + + mutex_lock(&ts->access_mutex); + ret = i2c_master_send(client, &buf[0], ARRAY_SIZE(buf)); + mutex_unlock(&ts->access_mutex); + if (ret < 0) { + dev_err(&client->dev, "i2c send data request error: %d\n", ret); + return ret; + } + + return 0; +} + +static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len) +{ + struct i2c_client *client = ts->client; + int ret; + + ret = mutex_trylock(&ts->command_mutex); + if (!ret) { + dev_err(&client->dev, "already waiting for a command\n"); + return -EBUSY; + } + + dev_dbg(&client->dev, "sending %d bytes for command 0x%x\n", + buf[1], buf[2]); + + ts->command_waiting = buf[2]; + + mutex_lock(&ts->access_mutex); + ret = i2c_master_send(client, buf, len); + mutex_unlock(&ts->access_mutex); + if (ret < 0) { + dev_err(&client->dev, "i2c send data request error: %d\n", ret); + goto unlock; + } + + dev_dbg(&client->dev, "waiting for result for command 0x%x\n", buf[2]); + + if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) { + ret = -ETIME; + goto unlock; + } + + ret = ts->command_result; + +unlock: + mutex_unlock(&ts->command_mutex); + return ret; +} + +static int zforce_command_wait(struct zforce_ts *ts, u8 cmd) +{ + struct i2c_client *client = ts->client; + char buf[3]; + int ret; + + dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd); + + buf[0] = FRAME_START; + buf[1] = 1; /* data size, command only */ + buf[2] = cmd; + + ret = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf)); + if (ret < 0) { + dev_err(&client->dev, "i2c send data request error: %d\n", ret); + return ret; + } + + return 0; +} + +static int zforce_resolution(struct zforce_ts *ts, u16 x, u16 y) +{ + struct i2c_client *client = ts->client; + char buf[7] = { FRAME_START, 5, COMMAND_RESOLUTION, + (x & 0xff), ((x >> 8) & 0xff), + (y & 0xff), ((y >> 8) & 0xff) }; + + dev_dbg(&client->dev, "set resolution to (%d,%d)\n", x, y); + + return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf)); +} + +static int zforce_scan_frequency(struct zforce_ts *ts, u16 idle, u16 finger, + u16 stylus) +{ + struct i2c_client *client = ts->client; + char buf[9] = { FRAME_START, 7, COMMAND_SCANFREQ, + (idle & 0xff), ((idle >> 8) & 0xff), + (finger & 0xff), ((finger >> 8) & 0xff), + (stylus & 0xff), ((stylus >> 8) & 0xff) }; + + dev_dbg(&client->dev, "set scan frequency to (idle: %d, finger: %d, stylus: %d)\n", + idle, finger, stylus); + + return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf)); +} + +static int zforce_setconfig(struct zforce_ts *ts, char b1) +{ + struct i2c_client *client = ts->client; + char buf[7] = { FRAME_START, 5, COMMAND_SETCONFIG, + b1, 0, 0, 0 }; + + dev_dbg(&client->dev, "set config to (%d)\n", b1); + + return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf)); +} + +static int zforce_start(struct zforce_ts *ts) +{ + struct i2c_client *client = ts->client; + const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev); + int ret; + + dev_dbg(&client->dev, "starting device\n"); + + ret = zforce_command_wait(ts, COMMAND_INITIALIZE); + if (ret) { + dev_err(&client->dev, "Unable to initialize, %d\n", ret); + return ret; + } + + ret = zforce_resolution(ts, pdata->x_max, pdata->y_max); + if (ret) { + dev_err(&client->dev, "Unable to set resolution, %d\n", ret); + goto error; + } + + ret = zforce_scan_frequency(ts, 10, 50, 50); + if (ret) { + dev_err(&client->dev, "Unable to set scan frequency, %d\n", + ret); + goto error; + } + + if (zforce_setconfig(ts, SETCONFIG_DUALTOUCH)) { + dev_err(&client->dev, "Unable to set config\n"); + goto error; + } + + /* start sending touch events */ + ret = zforce_command(ts, COMMAND_DATAREQUEST); + if (ret) { + dev_err(&client->dev, "Unable to request data\n"); + goto error; + } + + /* + * Per NN, initial cal. take max. of 200msec. + * Allow time to complete this calibration + */ + msleep(200); + + return 0; + +error: + zforce_command_wait(ts, COMMAND_DEACTIVATE); + return ret; +} + +static int zforce_stop(struct zforce_ts *ts) +{ + struct i2c_client *client = ts->client; + int ret; + + dev_dbg(&client->dev, "stopping device\n"); + + /* Deactivates touch sensing and puts the device into sleep. */ + ret = zforce_command_wait(ts, COMMAND_DEACTIVATE); + if (ret != 0) { + dev_err(&client->dev, "could not deactivate device, %d\n", + ret); + return ret; + } + + return 0; +} + +static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) +{ + struct i2c_client *client = ts->client; + const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev); + struct zforce_point point; + int count, i, num = 0; + + count = payload[0]; + if (count > ZFORCE_REPORT_POINTS) { + dev_warn(&client->dev, "to many coordinates %d, expected max %d\n", + count, ZFORCE_REPORT_POINTS); + count = ZFORCE_REPORT_POINTS; + } + + for (i = 0; i < count; i++) { + point.coord_x = + payload[9 * i + 2] << 8 | payload[9 * i + 1]; + point.coord_y = + payload[9 * i + 4] << 8 | payload[9 * i + 3]; + + if (point.coord_x > pdata->x_max || + point.coord_y > pdata->y_max) { + dev_warn(&client->dev, "coordinates (%d,%d) invalid\n", + point.coord_x, point.coord_y); + point.coord_x = point.coord_y = 0; + } + + point.state = payload[9 * i + 5] & 0x03; + point.id = (payload[9 * i + 5] & 0xfc) >> 2; + + /* determine touch major, minor and orientation */ + point.area_major = max(payload[9 * i + 6], + payload[9 * i + 7]); + point.area_minor = min(payload[9 * i + 6], + payload[9 * i + 7]); + point.orientation = payload[9 * i + 6] > payload[9 * i + 7]; + + point.pressure = payload[9 * i + 8]; + point.prblty = payload[9 * i + 9]; + + dev_dbg(&client->dev, + "point %d/%d: state %d, id %d, pressure %d, prblty %d, x %d, y %d, amajor %d, aminor %d, ori %d\n", + i, count, point.state, point.id, + point.pressure, point.prblty, + point.coord_x, point.coord_y, + point.area_major, point.area_minor, + point.orientation); + + /* the zforce id starts with "1", so needs to be decreased */ + input_mt_slot(ts->input, point.id - 1); + + input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, + point.state != STATE_UP); + + if (point.state != STATE_UP) { + input_report_abs(ts->input, ABS_MT_POSITION_X, + point.coord_x); + input_report_abs(ts->input, ABS_MT_POSITION_Y, + point.coord_y); + input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, + point.area_major); + input_report_abs(ts->input, ABS_MT_TOUCH_MINOR, + point.area_minor); + input_report_abs(ts->input, ABS_MT_ORIENTATION, + point.orientation); + num++; + } + } + + input_mt_sync_frame(ts->input); + + input_mt_report_finger_count(ts->input, num); + + input_sync(ts->input); + + return 0; +} + +static int zforce_read_packet(struct zforce_ts *ts, u8 *buf) +{ + struct i2c_client *client = ts->client; + int ret; + + mutex_lock(&ts->access_mutex); + + /* read 2 byte message header */ + ret = i2c_master_recv(client, buf, 2); + if (ret < 0) { + dev_err(&client->dev, "error reading header: %d\n", ret); + goto unlock; + } + + if (buf[PAYLOAD_HEADER] != FRAME_START) { + dev_err(&client->dev, "invalid frame start: %d\n", buf[0]); + ret = -EIO; + goto unlock; + } + + if (buf[PAYLOAD_LENGTH] <= 0 || buf[PAYLOAD_LENGTH] > 255) { + dev_err(&client->dev, "invalid payload length: %d\n", + buf[PAYLOAD_LENGTH]); + ret = -EIO; + goto unlock; + } + + /* read the message */ + ret = i2c_master_recv(client, &buf[PAYLOAD_BODY], buf[PAYLOAD_LENGTH]); + if (ret < 0) { + dev_err(&client->dev, "error reading payload: %d\n", ret); + goto unlock; + } + + dev_dbg(&client->dev, "read %d bytes for response command 0x%x\n", + buf[PAYLOAD_LENGTH], buf[PAYLOAD_BODY]); + +unlock: + mutex_unlock(&ts->access_mutex); + return ret; +} + +static void zforce_complete(struct zforce_ts *ts, int cmd, int result) +{ + struct i2c_client *client = ts->client; + + if (ts->command_waiting == cmd) { + dev_dbg(&client->dev, "completing command 0x%x\n", cmd); + ts->command_result = result; + complete(&ts->command_done); + } else { + dev_dbg(&client->dev, "command %d not for us\n", cmd); + } +} + +static irqreturn_t zforce_interrupt(int irq, void *dev_id) +{ + struct zforce_ts *ts = dev_id; + struct i2c_client *client = ts->client; + const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev); + int ret; + u8 payload_buffer[512]; + u8 *payload; + + /* + * When suspended, emit a wakeup signal if necessary and return. + * Due to the level-interrupt we will get re-triggered later. + */ + if (ts->suspended) { + if (device_may_wakeup(&client->dev)) + pm_wakeup_event(&client->dev, 500); + msleep(20); + return IRQ_HANDLED; + } + + dev_dbg(&client->dev, "handling interrupt\n"); + + /* Don't emit wakeup events from commands run by zforce_suspend */ + if (!ts->suspending && device_may_wakeup(&client->dev)) + pm_stay_awake(&client->dev); + + while (!gpio_get_value(pdata->gpio_int)) { + ret = zforce_read_packet(ts, payload_buffer); + if (ret < 0) { + dev_err(&client->dev, "could not read packet, ret: %d\n", + ret); + break; + } + + payload = &payload_buffer[PAYLOAD_BODY]; + + switch (payload[RESPONSE_ID]) { + case NOTIFICATION_TOUCH: + /* + * Always report touch-events received while + * suspending, when being a wakeup source + */ + if (ts->suspending && device_may_wakeup(&client->dev)) + pm_wakeup_event(&client->dev, 500); + zforce_touch_event(ts, &payload[RESPONSE_DATA]); + break; + + case NOTIFICATION_BOOTCOMPLETE: + ts->boot_complete = payload[RESPONSE_DATA]; + zforce_complete(ts, payload[RESPONSE_ID], 0); + break; + + case RESPONSE_INITIALIZE: + case RESPONSE_DEACTIVATE: + case RESPONSE_SETCONFIG: + case RESPONSE_RESOLUTION: + case RESPONSE_SCANFREQ: + zforce_complete(ts, payload[RESPONSE_ID], + payload[RESPONSE_DATA]); + break; + + case RESPONSE_STATUS: + /* + * Version Payload Results + * [2:major] [2:minor] [2:build] [2:rev] + */ + ts->version_major = (payload[RESPONSE_DATA + 1] << 8) | + payload[RESPONSE_DATA]; + ts->version_minor = (payload[RESPONSE_DATA + 3] << 8) | + payload[RESPONSE_DATA + 2]; + ts->version_build = (payload[RESPONSE_DATA + 5] << 8) | + payload[RESPONSE_DATA + 4]; + ts->version_rev = (payload[RESPONSE_DATA + 7] << 8) | + payload[RESPONSE_DATA + 6]; + dev_dbg(&ts->client->dev, "Firmware Version %04x:%04x %04x:%04x\n", + ts->version_major, ts->version_minor, + ts->version_build, ts->version_rev); + + zforce_complete(ts, payload[RESPONSE_ID], 0); + break; + + case NOTIFICATION_INVALID_COMMAND: + dev_err(&ts->client->dev, "invalid command: 0x%x\n", + payload[RESPONSE_DATA]); + break; + + default: + dev_err(&ts->client->dev, "unrecognized response id: 0x%x\n", + payload[RESPONSE_ID]); + break; + } + } + + if (!ts->suspending && device_may_wakeup(&client->dev)) + pm_relax(&client->dev); + + dev_dbg(&client->dev, "finished interrupt\n"); + + return IRQ_HANDLED; +} + +static int zforce_input_open(struct input_dev *dev) +{ + struct zforce_ts *ts = input_get_drvdata(dev); + int ret; + + ret = zforce_start(ts); + if (ret) + return ret; + + return 0; +} + +static void zforce_input_close(struct input_dev *dev) +{ + struct zforce_ts *ts = input_get_drvdata(dev); + struct i2c_client *client = ts->client; + int ret; + + ret = zforce_stop(ts); + if (ret) + dev_warn(&client->dev, "stopping zforce failed\n"); + + return; +} + +#ifdef CONFIG_PM_SLEEP +static int zforce_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct zforce_ts *ts = i2c_get_clientdata(client); + struct input_dev *input = ts->input; + int ret = 0; + + mutex_lock(&input->mutex); + ts->suspending = true; + + /* + * When configured as a wakeup source device should always wake + * the system, therefore start device if necessary. + */ + if (device_may_wakeup(&client->dev)) { + dev_dbg(&client->dev, "suspend while being a wakeup source\n"); + + /* Need to start device, if not open, to be a wakeup source. */ + if (!input->users) { + ret = zforce_start(ts); + if (ret) + goto unlock; + } + + enable_irq_wake(client->irq); + } else if (input->users) { + dev_dbg(&client->dev, "suspend without being a wakeup source\n"); + + ret = zforce_stop(ts); + if (ret) + goto unlock; + + disable_irq(client->irq); + } + + ts->suspended = true; + +unlock: + ts->suspending = false; + mutex_unlock(&input->mutex); + + return ret; +} + +static int zforce_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct zforce_ts *ts = i2c_get_clientdata(client); + struct input_dev *input = ts->input; + int ret = 0; + + mutex_lock(&input->mutex); + + ts->suspended = false; + + if (device_may_wakeup(&client->dev)) { + dev_dbg(&client->dev, "resume from being a wakeup source\n"); + + disable_irq_wake(client->irq); + + /* need to stop device if it was not open on suspend */ + if (!input->users) { + ret = zforce_stop(ts); + if (ret) + goto unlock; + } + } else if (input->users) { + dev_dbg(&client->dev, "resume without being a wakeup source\n"); + + enable_irq(client->irq); + + ret = zforce_start(ts); + if (ret < 0) + goto unlock; + } + +unlock: + mutex_unlock(&input->mutex); + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume); + +static void zforce_reset(void *data) +{ + struct zforce_ts *ts = data; + + gpio_set_value(ts->pdata->gpio_rst, 0); +} + +static int zforce_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev); + struct zforce_ts *ts; + struct input_dev *input_dev; + int ret; + + if (!pdata) + return -EINVAL; + + ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN, + "zforce_ts_int"); + if (ret) { + dev_err(&client->dev, "request of gpio %d failed, %d\n", + pdata->gpio_int, ret); + return ret; + } + + ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst, + GPIOF_OUT_INIT_LOW, "zforce_ts_rst"); + if (ret) { + dev_err(&client->dev, "request of gpio %d failed, %d\n", + pdata->gpio_rst, ret); + return ret; + } + + ret = devm_add_action(&client->dev, zforce_reset, ts); + if (ret) { + dev_err(&client->dev, "failed to register reset action, %d\n", + ret); + return ret; + } + + snprintf(ts->phys, sizeof(ts->phys), + "%s/input0", dev_name(&client->dev)); + + input_dev = devm_input_allocate_device(&client->dev); + if (!input_dev) { + dev_err(&client->dev, "could not allocate input device\n"); + return -ENOMEM; + } + + mutex_init(&ts->access_mutex); + mutex_init(&ts->command_mutex); + + ts->pdata = pdata; + ts->client = client; + ts->input = input_dev; + + input_dev->name = "Neonode zForce touchscreen"; + input_dev->phys = ts->phys; + input_dev->id.bustype = BUS_I2C; + + input_dev->open = zforce_input_open; + input_dev->close = zforce_input_close; + + __set_bit(EV_KEY, input_dev->evbit); + __set_bit(EV_SYN, input_dev->evbit); + __set_bit(EV_ABS, input_dev->evbit); + + /* For multi touch */ + input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, + pdata->x_max, 0, 0); + input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, + pdata->y_max, 0, 0); + + input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, + ZFORCE_MAX_AREA, 0, 0); + input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, + ZFORCE_MAX_AREA, 0, 0); + input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); + input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, INPUT_MT_DIRECT); + + input_set_drvdata(ts->input, ts); + + init_completion(&ts->command_done); + + /* + * The zforce pulls the interrupt low when it has data ready. + * After it is triggered the isr thread runs until all the available + * packets have been read and the interrupt is high again. + * Therefore we can trigger the interrupt anytime it is low and do + * not need to limit it to the interrupt edge. + */ + ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, + zforce_interrupt, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + input_dev->name, ts); + if (ret) { + dev_err(&client->dev, "irq %d request failed\n", client->irq); + return ret; + } + + i2c_set_clientdata(client, ts); + + /* let the controller boot */ + gpio_set_value(pdata->gpio_rst, 1); + + ts->command_waiting = NOTIFICATION_BOOTCOMPLETE; + if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) + dev_warn(&client->dev, "bootcomplete timed out\n"); + + /* need to start device to get version information */ + ret = zforce_command_wait(ts, COMMAND_INITIALIZE); + if (ret) { + dev_err(&client->dev, "unable to initialize, %d\n", ret); + return ret; + } + + /* this gets the firmware version among other informations */ + ret = zforce_command_wait(ts, COMMAND_STATUS); + if (ret < 0) { + dev_err(&client->dev, "couldn't get status, %d\n", ret); + zforce_stop(ts); + return ret; + } + + /* stop device and put it into sleep until it is opened */ + ret = zforce_stop(ts); + if (ret < 0) + return ret; + + device_set_wakeup_capable(&client->dev, true); + + ret = input_register_device(input_dev); + if (ret) { + dev_err(&client->dev, "could not register input device, %d\n", + ret); + return ret; + } + + return 0; +} + +static struct i2c_device_id zforce_idtable[] = { + { "zforce-ts", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, zforce_idtable); + +static struct i2c_driver zforce_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "zforce-ts", + .pm = &zforce_pm_ops, + }, + .probe = zforce_probe, + .id_table = zforce_idtable, +}; + +module_i2c_driver(zforce_driver); + +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_DESCRIPTION("zForce TouchScreen Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 9fd51e51e78b..3e7fdbb4916b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -206,7 +206,7 @@ config SHMOBILE_IPMMU_TLB config SHMOBILE_IOMMU bool "IOMMU for Renesas IPMMU/IPMMUI" default n - depends on ARM || COMPILE_TEST + depends on ARM select IOMMU_API select ARM_DMA_USE_IOMMU select SHMOBILE_IPMMU diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index baf2686aa8eb..02125e6a9109 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) spin_unlock_irqrestore(&card->isdnloop_lock, flags); return -ENOMEM; } - for (i = 0; i < 3; i++) - strcpy(card->s0num[i], sdef.num[i]); + for (i = 0; i < 3; i++) { + strlcpy(card->s0num[i], sdef.num[i], + sizeof(card->s0num[0])); + } break; case ISDN_PTYPE_1TR6: if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95", @@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) spin_unlock_irqrestore(&card->isdnloop_lock, flags); return -ENOMEM; } - strcpy(card->s0num[0], sdef.num[0]); + strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0])); card->s0num[1][0] = '\0'; card->s0num[2][0] = '\0'; break; diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index f950c9d29f3e..2638417b19aa 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -13,15 +13,8 @@ config BCACHE_DEBUG ---help--- Don't select this option unless you're a developer - Enables extra debugging tools (primarily a fuzz tester) - -config BCACHE_EDEBUG - bool "Extended runtime checks" - depends on BCACHE - ---help--- - Don't select this option unless you're a developer - - Enables extra runtime checks which significantly affect performance + Enables extra debugging tools, allows expensive runtime checks to be + turned on. config BCACHE_CLOSURES_DEBUG bool "Debug closures" diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index e45f5575fd4d..2b46bf1d7e40 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -63,13 +63,12 @@ #include "bcache.h" #include "btree.h" +#include <linux/blkdev.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/random.h> #include <trace/events/bcache.h> -#define MAX_IN_FLIGHT_DISCARDS 8U - /* Bucket heap / gen */ uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) @@ -121,75 +120,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) mutex_unlock(&c->bucket_lock); } -/* Discard/TRIM */ - -struct discard { - struct list_head list; - struct work_struct work; - struct cache *ca; - long bucket; - - struct bio bio; - struct bio_vec bv; -}; - -static void discard_finish(struct work_struct *w) -{ - struct discard *d = container_of(w, struct discard, work); - struct cache *ca = d->ca; - char buf[BDEVNAME_SIZE]; - - if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) { - pr_notice("discard error on %s, disabling", - bdevname(ca->bdev, buf)); - d->ca->discard = 0; - } - - mutex_lock(&ca->set->bucket_lock); - - fifo_push(&ca->free, d->bucket); - list_add(&d->list, &ca->discards); - atomic_dec(&ca->discards_in_flight); - - mutex_unlock(&ca->set->bucket_lock); - - closure_wake_up(&ca->set->bucket_wait); - wake_up_process(ca->alloc_thread); - - closure_put(&ca->set->cl); -} - -static void discard_endio(struct bio *bio, int error) -{ - struct discard *d = container_of(bio, struct discard, bio); - schedule_work(&d->work); -} - -static void do_discard(struct cache *ca, long bucket) -{ - struct discard *d = list_first_entry(&ca->discards, - struct discard, list); - - list_del(&d->list); - d->bucket = bucket; - - atomic_inc(&ca->discards_in_flight); - closure_get(&ca->set->cl); - - bio_init(&d->bio); - - d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket); - d->bio.bi_bdev = ca->bdev; - d->bio.bi_rw = REQ_WRITE|REQ_DISCARD; - d->bio.bi_max_vecs = 1; - d->bio.bi_io_vec = d->bio.bi_inline_vecs; - d->bio.bi_size = bucket_bytes(ca); - d->bio.bi_end_io = discard_endio; - bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - - submit_bio(0, &d->bio); -} - /* Allocation */ static inline bool can_inc_bucket_gen(struct bucket *b) @@ -280,7 +210,7 @@ static void invalidate_buckets_lru(struct cache *ca) * multiple times when it can't do anything */ ca->invalidate_needs_gc = 1; - bch_queue_gc(ca->set); + wake_up_gc(ca->set); return; } @@ -305,7 +235,7 @@ static void invalidate_buckets_fifo(struct cache *ca) if (++checked >= ca->sb.nbuckets) { ca->invalidate_needs_gc = 1; - bch_queue_gc(ca->set); + wake_up_gc(ca->set); return; } } @@ -330,7 +260,7 @@ static void invalidate_buckets_random(struct cache *ca) if (++checked >= ca->sb.nbuckets / 2) { ca->invalidate_needs_gc = 1; - bch_queue_gc(ca->set); + wake_up_gc(ca->set); return; } } @@ -398,16 +328,18 @@ static int bch_allocator_thread(void *arg) else break; - allocator_wait(ca, (int) fifo_free(&ca->free) > - atomic_read(&ca->discards_in_flight)); - if (ca->discard) { - allocator_wait(ca, !list_empty(&ca->discards)); - do_discard(ca, bucket); - } else { - fifo_push(&ca->free, bucket); - closure_wake_up(&ca->set->bucket_wait); + mutex_unlock(&ca->set->bucket_lock); + blkdev_issue_discard(ca->bdev, + bucket_to_sector(ca->set, bucket), + ca->sb.block_size, GFP_KERNEL, 0); + mutex_lock(&ca->set->bucket_lock); } + + allocator_wait(ca, !fifo_full(&ca->free)); + + fifo_push(&ca->free, bucket); + wake_up(&ca->set->bucket_wait); } /* @@ -433,16 +365,40 @@ static int bch_allocator_thread(void *arg) } } -long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) +long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait) { - long r = -1; -again: + DEFINE_WAIT(w); + struct bucket *b; + long r; + + /* fastpath */ + if (fifo_used(&ca->free) > ca->watermark[watermark]) { + fifo_pop(&ca->free, r); + goto out; + } + + if (!wait) + return -1; + + while (1) { + if (fifo_used(&ca->free) > ca->watermark[watermark]) { + fifo_pop(&ca->free, r); + break; + } + + prepare_to_wait(&ca->set->bucket_wait, &w, + TASK_UNINTERRUPTIBLE); + + mutex_unlock(&ca->set->bucket_lock); + schedule(); + mutex_lock(&ca->set->bucket_lock); + } + + finish_wait(&ca->set->bucket_wait, &w); +out: wake_up_process(ca->alloc_thread); - if (fifo_used(&ca->free) > ca->watermark[watermark] && - fifo_pop(&ca->free, r)) { - struct bucket *b = ca->buckets + r; -#ifdef CONFIG_BCACHE_EDEBUG + if (expensive_debug_checks(ca->set)) { size_t iter; long i; @@ -455,36 +411,23 @@ again: BUG_ON(i == r); fifo_for_each(i, &ca->unused, iter) BUG_ON(i == r); -#endif - BUG_ON(atomic_read(&b->pin) != 1); - - SET_GC_SECTORS_USED(b, ca->sb.bucket_size); - - if (watermark <= WATERMARK_METADATA) { - SET_GC_MARK(b, GC_MARK_METADATA); - b->prio = BTREE_PRIO; - } else { - SET_GC_MARK(b, GC_MARK_RECLAIMABLE); - b->prio = INITIAL_PRIO; - } - - return r; } - trace_bcache_alloc_fail(ca); + b = ca->buckets + r; - if (cl) { - closure_wait(&ca->set->bucket_wait, cl); + BUG_ON(atomic_read(&b->pin) != 1); - if (closure_blocking(cl)) { - mutex_unlock(&ca->set->bucket_lock); - closure_sync(cl); - mutex_lock(&ca->set->bucket_lock); - goto again; - } + SET_GC_SECTORS_USED(b, ca->sb.bucket_size); + + if (watermark <= WATERMARK_METADATA) { + SET_GC_MARK(b, GC_MARK_METADATA); + b->prio = BTREE_PRIO; + } else { + SET_GC_MARK(b, GC_MARK_RECLAIMABLE); + b->prio = INITIAL_PRIO; } - return -1; + return r; } void bch_bucket_free(struct cache_set *c, struct bkey *k) @@ -501,7 +444,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) } int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, - struct bkey *k, int n, struct closure *cl) + struct bkey *k, int n, bool wait) { int i; @@ -514,7 +457,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, for (i = 0; i < n; i++) { struct cache *ca = c->cache_by_alloc[i]; - long b = bch_bucket_alloc(ca, watermark, cl); + long b = bch_bucket_alloc(ca, watermark, wait); if (b == -1) goto err; @@ -529,22 +472,202 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, return 0; err: bch_bucket_free(c, k); - __bkey_put(c, k); + bkey_put(c, k); return -1; } int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, - struct bkey *k, int n, struct closure *cl) + struct bkey *k, int n, bool wait) { int ret; mutex_lock(&c->bucket_lock); - ret = __bch_bucket_alloc_set(c, watermark, k, n, cl); + ret = __bch_bucket_alloc_set(c, watermark, k, n, wait); mutex_unlock(&c->bucket_lock); return ret; } +/* Sector allocator */ + +struct open_bucket { + struct list_head list; + unsigned last_write_point; + unsigned sectors_free; + BKEY_PADDED(key); +}; + +/* + * We keep multiple buckets open for writes, and try to segregate different + * write streams for better cache utilization: first we look for a bucket where + * the last write to it was sequential with the current write, and failing that + * we look for a bucket that was last used by the same task. + * + * The ideas is if you've got multiple tasks pulling data into the cache at the + * same time, you'll get better cache utilization if you try to segregate their + * data and preserve locality. + * + * For example, say you've starting Firefox at the same time you're copying a + * bunch of files. Firefox will likely end up being fairly hot and stay in the + * cache awhile, but the data you copied might not be; if you wrote all that + * data to the same buckets it'd get invalidated at the same time. + * + * Both of those tasks will be doing fairly random IO so we can't rely on + * detecting sequential IO to segregate their data, but going off of the task + * should be a sane heuristic. + */ +static struct open_bucket *pick_data_bucket(struct cache_set *c, + const struct bkey *search, + unsigned write_point, + struct bkey *alloc) +{ + struct open_bucket *ret, *ret_task = NULL; + + list_for_each_entry_reverse(ret, &c->data_buckets, list) + if (!bkey_cmp(&ret->key, search)) + goto found; + else if (ret->last_write_point == write_point) + ret_task = ret; + + ret = ret_task ?: list_first_entry(&c->data_buckets, + struct open_bucket, list); +found: + if (!ret->sectors_free && KEY_PTRS(alloc)) { + ret->sectors_free = c->sb.bucket_size; + bkey_copy(&ret->key, alloc); + bkey_init(alloc); + } + + if (!ret->sectors_free) + ret = NULL; + + return ret; +} + +/* + * Allocates some space in the cache to write to, and k to point to the newly + * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the + * end of the newly allocated space). + * + * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many + * sectors were actually allocated. + * + * If s->writeback is true, will not fail. + */ +bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, + unsigned write_point, unsigned write_prio, bool wait) +{ + struct open_bucket *b; + BKEY_PADDED(key) alloc; + unsigned i; + + /* + * We might have to allocate a new bucket, which we can't do with a + * spinlock held. So if we have to allocate, we drop the lock, allocate + * and then retry. KEY_PTRS() indicates whether alloc points to + * allocated bucket(s). + */ + + bkey_init(&alloc.key); + spin_lock(&c->data_bucket_lock); + + while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { + unsigned watermark = write_prio + ? WATERMARK_MOVINGGC + : WATERMARK_NONE; + + spin_unlock(&c->data_bucket_lock); + + if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) + return false; + + spin_lock(&c->data_bucket_lock); + } + + /* + * If we had to allocate, we might race and not need to allocate the + * second time we call find_data_bucket(). If we allocated a bucket but + * didn't use it, drop the refcount bch_bucket_alloc_set() took: + */ + if (KEY_PTRS(&alloc.key)) + bkey_put(c, &alloc.key); + + for (i = 0; i < KEY_PTRS(&b->key); i++) + EBUG_ON(ptr_stale(c, &b->key, i)); + + /* Set up the pointer to the space we're allocating: */ + + for (i = 0; i < KEY_PTRS(&b->key); i++) + k->ptr[i] = b->key.ptr[i]; + + sectors = min(sectors, b->sectors_free); + + SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); + SET_KEY_SIZE(k, sectors); + SET_KEY_PTRS(k, KEY_PTRS(&b->key)); + + /* + * Move b to the end of the lru, and keep track of what this bucket was + * last used for: + */ + list_move_tail(&b->list, &c->data_buckets); + bkey_copy_key(&b->key, k); + b->last_write_point = write_point; + + b->sectors_free -= sectors; + + for (i = 0; i < KEY_PTRS(&b->key); i++) { + SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); + + atomic_long_add(sectors, + &PTR_CACHE(c, &b->key, i)->sectors_written); + } + + if (b->sectors_free < c->sb.block_size) + b->sectors_free = 0; + + /* + * k takes refcounts on the buckets it points to until it's inserted + * into the btree, but if we're done with this bucket we just transfer + * get_data_bucket()'s refcount. + */ + if (b->sectors_free) + for (i = 0; i < KEY_PTRS(&b->key); i++) + atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); + + spin_unlock(&c->data_bucket_lock); + return true; +} + /* Init */ +void bch_open_buckets_free(struct cache_set *c) +{ + struct open_bucket *b; + + while (!list_empty(&c->data_buckets)) { + b = list_first_entry(&c->data_buckets, + struct open_bucket, list); + list_del(&b->list); + kfree(b); + } +} + +int bch_open_buckets_alloc(struct cache_set *c) +{ + int i; + + spin_lock_init(&c->data_bucket_lock); + + for (i = 0; i < 6; i++) { + struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); + if (!b) + return -ENOMEM; + + list_add(&b->list, &c->data_buckets); + } + + return 0; +} + int bch_cache_allocator_start(struct cache *ca) { struct task_struct *k = kthread_run(bch_allocator_thread, @@ -556,22 +679,8 @@ int bch_cache_allocator_start(struct cache *ca) return 0; } -void bch_cache_allocator_exit(struct cache *ca) -{ - struct discard *d; - - while (!list_empty(&ca->discards)) { - d = list_first_entry(&ca->discards, struct discard, list); - cancel_work_sync(&d->work); - list_del(&d->list); - kfree(d); - } -} - int bch_cache_allocator_init(struct cache *ca) { - unsigned i; - /* * Reserve: * Prio/gen writes first @@ -589,15 +698,5 @@ int bch_cache_allocator_init(struct cache *ca) ca->watermark[WATERMARK_NONE] = ca->free.size / 2 + ca->watermark[WATERMARK_MOVINGGC]; - for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) { - struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL); - if (!d) - return -ENOMEM; - - d->ca = ca; - INIT_WORK(&d->work, discard_finish); - list_add(&d->list, &ca->discards); - } - return 0; } diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 0f12382aa35d..4beb55a0ff30 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -177,6 +177,7 @@ #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ +#include <linux/bcache.h> #include <linux/bio.h> #include <linux/kobject.h> #include <linux/list.h> @@ -210,168 +211,6 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define GC_MARK_METADATA 2 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); -struct bkey { - uint64_t high; - uint64_t low; - uint64_t ptr[]; -}; - -/* Enough for a key with 6 pointers */ -#define BKEY_PAD 8 - -#define BKEY_PADDED(key) \ - union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; } - -/* Version 0: Cache device - * Version 1: Backing device - * Version 2: Seed pointer into btree node checksum - * Version 3: Cache device with new UUID format - * Version 4: Backing device with data offset - */ -#define BCACHE_SB_VERSION_CDEV 0 -#define BCACHE_SB_VERSION_BDEV 1 -#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3 -#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4 -#define BCACHE_SB_MAX_VERSION 4 - -#define SB_SECTOR 8 -#define SB_SIZE 4096 -#define SB_LABEL_SIZE 32 -#define SB_JOURNAL_BUCKETS 256U -/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */ -#define MAX_CACHES_PER_SET 8 - -#define BDEV_DATA_START_DEFAULT 16 /* sectors */ - -struct cache_sb { - uint64_t csum; - uint64_t offset; /* sector where this sb was written */ - uint64_t version; - - uint8_t magic[16]; - - uint8_t uuid[16]; - union { - uint8_t set_uuid[16]; - uint64_t set_magic; - }; - uint8_t label[SB_LABEL_SIZE]; - - uint64_t flags; - uint64_t seq; - uint64_t pad[8]; - - union { - struct { - /* Cache devices */ - uint64_t nbuckets; /* device size */ - - uint16_t block_size; /* sectors */ - uint16_t bucket_size; /* sectors */ - - uint16_t nr_in_set; - uint16_t nr_this_dev; - }; - struct { - /* Backing devices */ - uint64_t data_offset; - - /* - * block_size from the cache device section is still used by - * backing devices, so don't add anything here until we fix - * things to not need it for backing devices anymore - */ - }; - }; - - uint32_t last_mount; /* time_t */ - - uint16_t first_bucket; - union { - uint16_t njournal_buckets; - uint16_t keys; - }; - uint64_t d[SB_JOURNAL_BUCKETS]; /* journal buckets */ -}; - -BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1); -BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1); -BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3); -#define CACHE_REPLACEMENT_LRU 0U -#define CACHE_REPLACEMENT_FIFO 1U -#define CACHE_REPLACEMENT_RANDOM 2U - -BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4); -#define CACHE_MODE_WRITETHROUGH 0U -#define CACHE_MODE_WRITEBACK 1U -#define CACHE_MODE_WRITEAROUND 2U -#define CACHE_MODE_NONE 3U -BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2); -#define BDEV_STATE_NONE 0U -#define BDEV_STATE_CLEAN 1U -#define BDEV_STATE_DIRTY 2U -#define BDEV_STATE_STALE 3U - -/* Version 1: Seed pointer into btree node checksum - */ -#define BCACHE_BSET_VERSION 1 - -/* - * This is the on disk format for btree nodes - a btree node on disk is a list - * of these; within each set the keys are sorted - */ -struct bset { - uint64_t csum; - uint64_t magic; - uint64_t seq; - uint32_t version; - uint32_t keys; - - union { - struct bkey start[0]; - uint64_t d[0]; - }; -}; - -/* - * On disk format for priorities and gens - see super.c near prio_write() for - * more. - */ -struct prio_set { - uint64_t csum; - uint64_t magic; - uint64_t seq; - uint32_t version; - uint32_t pad; - - uint64_t next_bucket; - - struct bucket_disk { - uint16_t prio; - uint8_t gen; - } __attribute((packed)) data[]; -}; - -struct uuid_entry { - union { - struct { - uint8_t uuid[16]; - uint8_t label[32]; - uint32_t first_reg; - uint32_t last_reg; - uint32_t invalidated; - - uint32_t flags; - /* Size of flash only volumes */ - uint64_t sectors; - }; - - uint8_t pad[128]; - }; -}; - -BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1); - #include "journal.h" #include "stats.h" struct search; @@ -384,8 +223,6 @@ struct keybuf_key { void *private; }; -typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); - struct keybuf { struct bkey last_scanned; spinlock_t lock; @@ -400,7 +237,7 @@ struct keybuf { struct rb_root keys; -#define KEYBUF_NR 100 +#define KEYBUF_NR 500 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); }; @@ -429,16 +266,15 @@ struct bcache_device { struct gendisk *disk; - /* If nonzero, we're closing */ - atomic_t closing; - - /* If nonzero, we're detaching/unregistering from cache set */ - atomic_t detaching; - int flush_done; + unsigned long flags; +#define BCACHE_DEV_CLOSING 0 +#define BCACHE_DEV_DETACHING 1 +#define BCACHE_DEV_UNLINK_DONE 2 - uint64_t nr_stripes; - unsigned stripe_size_bits; + unsigned nr_stripes; + unsigned stripe_size; atomic_t *stripe_sectors_dirty; + unsigned long *full_dirty_stripes; unsigned long sectors_dirty_last; long sectors_dirty_derivative; @@ -509,7 +345,7 @@ struct cached_dev { /* Limit number of writeback bios in flight */ struct semaphore in_flight; - struct closure_with_timer writeback; + struct task_struct *writeback_thread; struct keybuf writeback_keys; @@ -527,8 +363,8 @@ struct cached_dev { unsigned sequential_cutoff; unsigned readahead; - unsigned sequential_merge:1; unsigned verify:1; + unsigned bypass_torture_test:1; unsigned partial_stripes_expensive:1; unsigned writeback_metadata:1; @@ -620,15 +456,6 @@ struct cache { bool discard; /* Get rid of? */ - /* - * We preallocate structs for issuing discards to buckets, and keep them - * on this list when they're not in use; do_discard() issues discards - * whenever there's work to do and is called by free_some_buckets() and - * when a discard finishes. - */ - atomic_t discards_in_flight; - struct list_head discards; - struct journal_device journal; /* The rest of this all shows up in sysfs */ @@ -649,7 +476,6 @@ struct gc_stat { size_t nkeys; uint64_t data; /* sectors */ - uint64_t dirty; /* sectors */ unsigned in_use; /* percent */ }; @@ -744,8 +570,8 @@ struct cache_set { * basically a lock for this that we can wait on asynchronously. The * btree_root() macro releases the lock when it returns. */ - struct closure *try_harder; - struct closure_waitlist try_wait; + struct task_struct *try_harder; + wait_queue_head_t try_wait; uint64_t try_harder_start; /* @@ -759,7 +585,7 @@ struct cache_set { * written. */ atomic_t prio_blocked; - struct closure_waitlist bucket_wait; + wait_queue_head_t bucket_wait; /* * For any bio we don't skip we subtract the number of sectors from @@ -782,7 +608,7 @@ struct cache_set { struct gc_stat gc_stats; size_t nbuckets; - struct closure_with_waitlist gc; + struct task_struct *gc_thread; /* Where in the btree gc currently is */ struct bkey gc_done; @@ -795,11 +621,10 @@ struct cache_set { /* Counts how many sectors bio_insert has added to the cache */ atomic_t sectors_to_gc; - struct closure moving_gc; - struct closure_waitlist moving_gc_wait; + wait_queue_head_t moving_gc_wait; struct keybuf moving_gc_keys; /* Number of moving GC bios in flight */ - atomic_t in_flight; + struct semaphore moving_in_flight; struct btree *root; @@ -841,22 +666,27 @@ struct cache_set { unsigned congested_read_threshold_us; unsigned congested_write_threshold_us; - spinlock_t sort_time_lock; struct time_stats sort_time; struct time_stats btree_gc_time; struct time_stats btree_split_time; - spinlock_t btree_read_time_lock; struct time_stats btree_read_time; struct time_stats try_harder_time; atomic_long_t cache_read_races; atomic_long_t writeback_keys_done; atomic_long_t writeback_keys_failed; + + enum { + ON_ERROR_UNREGISTER, + ON_ERROR_PANIC, + } on_error; unsigned error_limit; unsigned error_decay; + unsigned short journal_delay_ms; unsigned verify:1; unsigned key_merging_disabled:1; + unsigned expensive_debug_checks:1; unsigned gc_always_rewrite:1; unsigned shrinker_disabled:1; unsigned copy_gc_enabled:1; @@ -865,21 +695,6 @@ struct cache_set { struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; }; -static inline bool key_merging_disabled(struct cache_set *c) -{ -#ifdef CONFIG_BCACHE_DEBUG - return c->key_merging_disabled; -#else - return 0; -#endif -} - -static inline bool SB_IS_BDEV(const struct cache_sb *sb) -{ - return sb->version == BCACHE_SB_VERSION_BDEV - || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET; -} - struct bbio { unsigned submit_time_us; union { @@ -933,59 +748,6 @@ static inline unsigned local_clock_us(void) #define prio_buckets(c) \ DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) -#define JSET_MAGIC 0x245235c1a3625032ULL -#define PSET_MAGIC 0x6750e15f87337f91ULL -#define BSET_MAGIC 0x90135c78b99e07f5ULL - -#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC) -#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC) -#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC) - -/* Bkey fields: all units are in sectors */ - -#define KEY_FIELD(name, field, offset, size) \ - BITMASK(name, struct bkey, field, offset, size) - -#define PTR_FIELD(name, offset, size) \ - static inline uint64_t name(const struct bkey *k, unsigned i) \ - { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \ - \ - static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\ - { \ - k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \ - k->ptr[i] |= v << offset; \ - } - -KEY_FIELD(KEY_PTRS, high, 60, 3) -KEY_FIELD(HEADER_SIZE, high, 58, 2) -KEY_FIELD(KEY_CSUM, high, 56, 2) -KEY_FIELD(KEY_PINNED, high, 55, 1) -KEY_FIELD(KEY_DIRTY, high, 36, 1) - -KEY_FIELD(KEY_SIZE, high, 20, 16) -KEY_FIELD(KEY_INODE, high, 0, 20) - -/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */ - -static inline uint64_t KEY_OFFSET(const struct bkey *k) -{ - return k->low; -} - -static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v) -{ - k->low = v; -} - -PTR_FIELD(PTR_DEV, 51, 12) -PTR_FIELD(PTR_OFFSET, 8, 43) -PTR_FIELD(PTR_GEN, 0, 8) - -#define PTR_CHECK_DEV ((1 << 12) - 1) - -#define PTR(gen, offset, dev) \ - ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen) - static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) { return s >> c->bucket_bits; @@ -1024,27 +786,11 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c, /* Btree key macros */ -/* - * The high bit being set is a relic from when we used it to do binary - * searches - it told you where a key started. It's not used anymore, - * and can probably be safely dropped. - */ -#define KEY(dev, sector, len) \ -((struct bkey) { \ - .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \ - .low = (sector) \ -}) - static inline void bkey_init(struct bkey *k) { - *k = KEY(0, 0, 0); + *k = ZERO_KEY; } -#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k)) -#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0) -#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0) -#define ZERO_KEY KEY(0, 0, 0) - /* * This is used for various on disk data structures - cache_sb, prio_set, bset, * jset: The checksum is _always_ the first 8 bytes of these structs @@ -1094,14 +840,6 @@ do { \ for (b = (ca)->buckets + (ca)->sb.first_bucket; \ b < (ca)->buckets + (ca)->sb.nbuckets; b++) -static inline void __bkey_put(struct cache_set *c, struct bkey *k) -{ - unsigned i; - - for (i = 0; i < KEY_PTRS(k); i++) - atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); -} - static inline void cached_dev_put(struct cached_dev *dc) { if (atomic_dec_and_test(&dc->count)) @@ -1173,13 +911,15 @@ uint8_t bch_inc_gen(struct cache *, struct bucket *); void bch_rescale_priorities(struct cache_set *, int); bool bch_bucket_add_unused(struct cache *, struct bucket *); -long bch_bucket_alloc(struct cache *, unsigned, struct closure *); +long bch_bucket_alloc(struct cache *, unsigned, bool); void bch_bucket_free(struct cache_set *, struct bkey *); int __bch_bucket_alloc_set(struct cache_set *, unsigned, - struct bkey *, int, struct closure *); + struct bkey *, int, bool); int bch_bucket_alloc_set(struct cache_set *, unsigned, - struct bkey *, int, struct closure *); + struct bkey *, int, bool); +bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, + unsigned, unsigned, bool); __printf(2, 3) bool bch_cache_set_error(struct cache_set *, const char *, ...); @@ -1187,7 +927,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...); void bch_prio_write(struct cache *); void bch_write_bdev_super(struct cached_dev *, struct closure *); -extern struct workqueue_struct *bcache_wq, *bch_gc_wq; +extern struct workqueue_struct *bcache_wq; extern const char * const bch_cache_modes[]; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; @@ -1220,15 +960,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *); void bch_btree_cache_free(struct cache_set *); int bch_btree_cache_alloc(struct cache_set *); void bch_moving_init_cache_set(struct cache_set *); +int bch_open_buckets_alloc(struct cache_set *); +void bch_open_buckets_free(struct cache_set *); int bch_cache_allocator_start(struct cache *ca); -void bch_cache_allocator_exit(struct cache *ca); int bch_cache_allocator_init(struct cache *ca); void bch_debug_exit(void); int bch_debug_init(struct kobject *); -void bch_writeback_exit(void); -int bch_writeback_init(void); void bch_request_exit(void); int bch_request_init(void); void bch_btree_exit(void); diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 22d1ae72c282..7d388b8bb50e 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -14,22 +14,12 @@ /* Keylists */ -void bch_keylist_copy(struct keylist *dest, struct keylist *src) -{ - *dest = *src; - - if (src->list == src->d) { - size_t n = (uint64_t *) src->top - src->d; - dest->top = (struct bkey *) &dest->d[n]; - dest->list = dest->d; - } -} - int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c) { - unsigned oldsize = (uint64_t *) l->top - l->list; - unsigned newsize = oldsize + 2 + nptrs; - uint64_t *new; + size_t oldsize = bch_keylist_nkeys(l); + size_t newsize = oldsize + 2 + nptrs; + uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; + uint64_t *new_keys; /* The journalling code doesn't handle the case where the keys to insert * is bigger than an empty write: If we just return -ENOMEM here, @@ -45,24 +35,23 @@ int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c) roundup_pow_of_two(oldsize) == newsize) return 0; - new = krealloc(l->list == l->d ? NULL : l->list, - sizeof(uint64_t) * newsize, GFP_NOIO); + new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO); - if (!new) + if (!new_keys) return -ENOMEM; - if (l->list == l->d) - memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE); + if (!old_keys) + memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); - l->list = new; - l->top = (struct bkey *) (&l->list[oldsize]); + l->keys_p = new_keys; + l->top_p = new_keys + oldsize; return 0; } struct bkey *bch_keylist_pop(struct keylist *l) { - struct bkey *k = l->bottom; + struct bkey *k = l->keys; if (k == l->top) return NULL; @@ -73,21 +62,20 @@ struct bkey *bch_keylist_pop(struct keylist *l) return l->top = k; } -/* Pointer validation */ - -bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) +void bch_keylist_pop_front(struct keylist *l) { - unsigned i; - char buf[80]; + l->top_p -= bkey_u64s(l->keys); - if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))) - goto bad; + memmove(l->keys, + bkey_next(l->keys), + bch_keylist_bytes(l)); +} - if (!level && KEY_SIZE(k) > KEY_OFFSET(k)) - goto bad; +/* Pointer validation */ - if (!KEY_SIZE(k)) - return true; +static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) +{ + unsigned i; for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { @@ -98,13 +86,83 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) if (KEY_SIZE(k) + r > c->sb.bucket_size || bucket < ca->sb.first_bucket || bucket >= ca->sb.nbuckets) - goto bad; + return true; } return false; +} + +bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) +{ + char buf[80]; + + if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) + goto bad; + + if (__ptr_invalid(c, k)) + goto bad; + + return false; +bad: + bch_bkey_to_text(buf, sizeof(buf), k); + cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); + return true; +} + +bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k) +{ + char buf[80]; + + if (!KEY_SIZE(k)) + return true; + + if (KEY_SIZE(k) > KEY_OFFSET(k)) + goto bad; + + if (__ptr_invalid(c, k)) + goto bad; + + return false; bad: bch_bkey_to_text(buf, sizeof(buf), k); - cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k)); + cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); + return true; +} + +static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k, + unsigned ptr) +{ + struct bucket *g = PTR_BUCKET(b->c, k, ptr); + char buf[80]; + + if (mutex_trylock(&b->c->bucket_lock)) { + if (b->level) { + if (KEY_DIRTY(k) || + g->prio != BTREE_PRIO || + (b->c->gc_mark_valid && + GC_MARK(g) != GC_MARK_METADATA)) + goto err; + + } else { + if (g->prio == BTREE_PRIO) + goto err; + + if (KEY_DIRTY(k) && + b->c->gc_mark_valid && + GC_MARK(g) != GC_MARK_DIRTY) + goto err; + } + mutex_unlock(&b->c->bucket_lock); + } + + return false; +err: + mutex_unlock(&b->c->bucket_lock); + bch_bkey_to_text(buf, sizeof(buf), k); + btree_bug(b, +"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", + buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), + g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); return true; } @@ -118,64 +176,29 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k) bch_ptr_invalid(b, k)) return true; - if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV) - return true; + for (i = 0; i < KEY_PTRS(k); i++) { + if (!ptr_available(b->c, k, i)) + return true; - for (i = 0; i < KEY_PTRS(k); i++) - if (ptr_available(b->c, k, i)) { - g = PTR_BUCKET(b->c, k, i); - stale = ptr_stale(b->c, k, i); + g = PTR_BUCKET(b->c, k, i); + stale = ptr_stale(b->c, k, i); - btree_bug_on(stale > 96, b, - "key too stale: %i, need_gc %u", - stale, b->c->need_gc); + btree_bug_on(stale > 96, b, + "key too stale: %i, need_gc %u", + stale, b->c->need_gc); - btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), - b, "stale dirty pointer"); + btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), + b, "stale dirty pointer"); - if (stale) - return true; + if (stale) + return true; -#ifdef CONFIG_BCACHE_EDEBUG - if (!mutex_trylock(&b->c->bucket_lock)) - continue; - - if (b->level) { - if (KEY_DIRTY(k) || - g->prio != BTREE_PRIO || - (b->c->gc_mark_valid && - GC_MARK(g) != GC_MARK_METADATA)) - goto bug; - - } else { - if (g->prio == BTREE_PRIO) - goto bug; - - if (KEY_DIRTY(k) && - b->c->gc_mark_valid && - GC_MARK(g) != GC_MARK_DIRTY) - goto bug; - } - mutex_unlock(&b->c->bucket_lock); -#endif - } + if (expensive_debug_checks(b->c) && + ptr_bad_expensive_checks(b, k, i)) + return true; + } return false; -#ifdef CONFIG_BCACHE_EDEBUG -bug: - mutex_unlock(&b->c->bucket_lock); - - { - char buf[80]; - - bch_bkey_to_text(buf, sizeof(buf), k); - btree_bug(b, -"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", - buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), - g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); - } - return true; -#endif } /* Key/pointer manipulation */ @@ -458,16 +481,8 @@ static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline) static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) { -#ifdef CONFIG_X86_64 - asm("shrd %[shift],%[high],%[low]" - : [low] "+Rm" (low) - : [high] "R" (high), - [shift] "ci" (shift) - : "cc"); -#else low >>= shift; low |= (high << 1) << (63U - shift); -#endif return low; } @@ -686,7 +701,7 @@ void bch_bset_init_next(struct btree *b) } else get_random_bytes(&i->seq, sizeof(uint64_t)); - i->magic = bset_magic(b->c); + i->magic = bset_magic(&b->c->sb); i->version = 0; i->keys = 0; @@ -824,16 +839,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, } else i = bset_search_write_set(b, t, search); -#ifdef CONFIG_BCACHE_EDEBUG - BUG_ON(bset_written(b, t) && - i.l != t->data->start && - bkey_cmp(tree_to_prev_bkey(t, - inorder_to_tree(bkey_to_cacheline(t, i.l), t)), - search) > 0); + if (expensive_debug_checks(b->c)) { + BUG_ON(bset_written(b, t) && + i.l != t->data->start && + bkey_cmp(tree_to_prev_bkey(t, + inorder_to_tree(bkey_to_cacheline(t, i.l), t)), + search) > 0); - BUG_ON(i.r != end(t->data) && - bkey_cmp(i.r, search) <= 0); -#endif + BUG_ON(i.r != end(t->data) && + bkey_cmp(i.r, search) <= 0); + } while (likely(i.l != i.r) && bkey_cmp(i.l, search) <= 0) @@ -844,6 +859,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, /* Btree iterator */ +/* + * Returns true if l > r - unless l == r, in which case returns true if l is + * older than r. + * + * Necessary for btree_sort_fixup() - if there are multiple keys that compare + * equal in different sets, we have to process them newest to oldest. + */ static inline bool btree_iter_cmp(struct btree_iter_set l, struct btree_iter_set r) { @@ -867,12 +889,16 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, } struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, - struct bkey *search, struct bset_tree *start) + struct bkey *search, struct bset_tree *start) { struct bkey *ret = NULL; iter->size = ARRAY_SIZE(iter->data); iter->used = 0; +#ifdef CONFIG_BCACHE_DEBUG + iter->b = b; +#endif + for (; start <= &b->sets[b->nsets]; start++) { ret = bch_bset_search(b, start, search); bch_btree_iter_push(iter, ret, end(start->data)); @@ -887,6 +913,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter) struct bkey *ret = NULL; if (!btree_iter_end(iter)) { + bch_btree_iter_next_check(iter); + ret = iter->data->k; iter->data->k = bkey_next(iter->data->k); @@ -916,14 +944,6 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, return ret; } -struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search) -{ - struct btree_iter iter; - - bch_btree_iter_init(b, &iter, search); - return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); -} - /* Mergesort */ static void sort_key_next(struct btree_iter *iter, @@ -998,7 +1018,6 @@ static void btree_mergesort(struct btree *b, struct bset *out, out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; pr_debug("sorted %i keys", out->keys); - bch_check_key_order(b, out); } static void __btree_sort(struct btree *b, struct btree_iter *iter, @@ -1029,7 +1048,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter, * memcpy() */ - out->magic = bset_magic(b->c); + out->magic = bset_magic(&b->c->sb); out->seq = b->sets[0].data->seq; out->version = b->sets[0].data->version; swap(out, b->sets[0].data); @@ -1050,24 +1069,21 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter, if (b->written) bset_build_written_tree(b); - if (!start) { - spin_lock(&b->c->sort_time_lock); + if (!start) bch_time_stats_update(&b->c->sort_time, start_time); - spin_unlock(&b->c->sort_time_lock); - } } void bch_btree_sort_partial(struct btree *b, unsigned start) { - size_t oldsize = 0, order = b->page_order, keys = 0; + size_t order = b->page_order, keys = 0; struct btree_iter iter; + int oldsize = bch_count_data(b); + __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); BUG_ON(b->sets[b->nsets].data == write_block(b) && (b->sets[b->nsets].size || b->nsets)); - if (b->written) - oldsize = bch_count_data(b); if (start) { unsigned i; @@ -1083,7 +1099,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start) __btree_sort(b, &iter, start, order, false); - EBUG_ON(b->written && bch_count_data(b) != oldsize); + EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize); } void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) @@ -1101,9 +1117,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new) btree_mergesort(b, new->sets->data, &iter, false, true); - spin_lock(&b->c->sort_time_lock); bch_time_stats_update(&b->c->sort_time, start_time); - spin_unlock(&b->c->sort_time_lock); bkey_copy_key(&new->key, &b->key); new->sets->size = 0; @@ -1148,16 +1162,16 @@ out: /* Sysfs stuff */ struct bset_stats { + struct btree_op op; size_t nodes; size_t sets_written, sets_unwritten; size_t bytes_written, bytes_unwritten; size_t floats, failed; }; -static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, - struct bset_stats *stats) +static int btree_bset_stats(struct btree_op *op, struct btree *b) { - struct bkey *k; + struct bset_stats *stats = container_of(op, struct bset_stats, op); unsigned i; stats->nodes++; @@ -1182,30 +1196,19 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, } } - if (b->level) { - struct btree_iter iter; - - for_each_key_filter(b, k, &iter, bch_ptr_bad) { - int ret = btree(bset_stats, k, b, op, stats); - if (ret) - return ret; - } - } - - return 0; + return MAP_CONTINUE; } int bch_bset_print_stats(struct cache_set *c, char *buf) { - struct btree_op op; struct bset_stats t; int ret; - bch_btree_op_init_stack(&op); memset(&t, 0, sizeof(struct bset_stats)); + bch_btree_op_init(&t.op, -1); - ret = btree_root(bset_stats, c, &op, &t); - if (ret) + ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats); + if (ret < 0) return ret; return snprintf(buf, PAGE_SIZE, diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index ae115a253d73..1d3c24f9fa0e 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -148,6 +148,9 @@ struct btree_iter { size_t size, used; +#ifdef CONFIG_BCACHE_DEBUG + struct btree *b; +#endif struct btree_iter_set { struct bkey *k, *end; } data[MAX_BSETS]; @@ -193,54 +196,26 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l, : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); } -static inline size_t bkey_u64s(const struct bkey *k) -{ - BUG_ON(KEY_CSUM(k) > 1); - return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0); -} - -static inline size_t bkey_bytes(const struct bkey *k) -{ - return bkey_u64s(k) * sizeof(uint64_t); -} - -static inline void bkey_copy(struct bkey *dest, const struct bkey *src) -{ - memcpy(dest, src, bkey_bytes(src)); -} - -static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src) -{ - if (!src) - src = &KEY(0, 0, 0); - - SET_KEY_INODE(dest, KEY_INODE(src)); - SET_KEY_OFFSET(dest, KEY_OFFSET(src)); -} - -static inline struct bkey *bkey_next(const struct bkey *k) -{ - uint64_t *d = (void *) k; - return (struct bkey *) (d + bkey_u64s(k)); -} - /* Keylists */ struct keylist { - struct bkey *top; union { - uint64_t *list; - struct bkey *bottom; + struct bkey *keys; + uint64_t *keys_p; + }; + union { + struct bkey *top; + uint64_t *top_p; }; /* Enough room for btree_split's keys without realloc */ #define KEYLIST_INLINE 16 - uint64_t d[KEYLIST_INLINE]; + uint64_t inline_keys[KEYLIST_INLINE]; }; static inline void bch_keylist_init(struct keylist *l) { - l->top = (void *) (l->list = l->d); + l->top_p = l->keys_p = l->inline_keys; } static inline void bch_keylist_push(struct keylist *l) @@ -256,17 +231,32 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k) static inline bool bch_keylist_empty(struct keylist *l) { - return l->top == (void *) l->list; + return l->top == l->keys; +} + +static inline void bch_keylist_reset(struct keylist *l) +{ + l->top = l->keys; } static inline void bch_keylist_free(struct keylist *l) { - if (l->list != l->d) - kfree(l->list); + if (l->keys_p != l->inline_keys) + kfree(l->keys_p); +} + +static inline size_t bch_keylist_nkeys(struct keylist *l) +{ + return l->top_p - l->keys_p; +} + +static inline size_t bch_keylist_bytes(struct keylist *l) +{ + return bch_keylist_nkeys(l) * sizeof(uint64_t); } -void bch_keylist_copy(struct keylist *, struct keylist *); struct bkey *bch_keylist_pop(struct keylist *); +void bch_keylist_pop_front(struct keylist *); int bch_keylist_realloc(struct keylist *, int, struct cache_set *); void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, @@ -287,7 +277,9 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) } const char *bch_ptr_status(struct cache_set *, const struct bkey *); -bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *); +bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); +bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *); + bool bch_ptr_bad(struct btree *, const struct bkey *); static inline uint8_t gen_after(uint8_t a, uint8_t b) @@ -311,7 +303,6 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k, typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *); -struct bkey *bch_next_recurse_key(struct btree *, struct bkey *); struct bkey *bch_btree_iter_next(struct btree_iter *); struct bkey *bch_btree_iter_next_filter(struct btree_iter *, struct btree *, ptr_filter_fn); @@ -361,12 +352,30 @@ void bch_bset_fix_lookup_table(struct btree *, struct bkey *); struct bkey *__bch_bset_search(struct btree *, struct bset_tree *, const struct bkey *); +/* + * Returns the first key that is strictly greater than search + */ static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t, const struct bkey *search) { return search ? __bch_bset_search(b, t, search) : t->data->start; } +#define PRECEDING_KEY(_k) \ +({ \ + struct bkey *_ret = NULL; \ + \ + if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ + _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ + \ + if (!_ret->low) \ + _ret->high--; \ + _ret->low--; \ + } \ + \ + _ret; \ +}) + bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *); void bch_btree_sort_lazy(struct btree *); void bch_btree_sort_into(struct btree *, struct btree *); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index f42fc7ed9cd6..5e2765aadce1 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -23,12 +23,13 @@ #include "bcache.h" #include "btree.h" #include "debug.h" -#include "request.h" #include "writeback.h" #include <linux/slab.h> #include <linux/bitops.h> +#include <linux/freezer.h> #include <linux/hash.h> +#include <linux/kthread.h> #include <linux/prefetch.h> #include <linux/random.h> #include <linux/rcupdate.h> @@ -88,15 +89,13 @@ * Test module load/unload */ -static const char * const op_types[] = { - "insert", "replace" +enum { + BTREE_INSERT_STATUS_INSERT, + BTREE_INSERT_STATUS_BACK_MERGE, + BTREE_INSERT_STATUS_OVERWROTE, + BTREE_INSERT_STATUS_FRONT_MERGE, }; -static const char *op_type(struct btree_op *op) -{ - return op_types[op->type]; -} - #define MAX_NEED_GC 64 #define MAX_SAVE_PRIO 72 @@ -105,23 +104,89 @@ static const char *op_type(struct btree_op *op) #define PTR_HASH(c, k) \ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) -struct workqueue_struct *bch_gc_wq; static struct workqueue_struct *btree_io_wq; -void bch_btree_op_init_stack(struct btree_op *op) +static inline bool should_split(struct btree *b) { - memset(op, 0, sizeof(struct btree_op)); - closure_init_stack(&op->cl); - op->lock = -1; - bch_keylist_init(&op->keys); + struct bset *i = write_block(b); + return b->written >= btree_blocks(b) || + (b->written + __set_blocks(i, i->keys + 15, b->c) + > btree_blocks(b)); } +#define insert_lock(s, b) ((b)->level <= (s)->lock) + +/* + * These macros are for recursing down the btree - they handle the details of + * locking and looking up nodes in the cache for you. They're best treated as + * mere syntax when reading code that uses them. + * + * op->lock determines whether we take a read or a write lock at a given depth. + * If you've got a read lock and find that you need a write lock (i.e. you're + * going to have to split), set op->lock and return -EINTR; btree_root() will + * call you again and you'll have the correct lock. + */ + +/** + * btree - recurse down the btree on a specified key + * @fn: function to call, which will be passed the child node + * @key: key to recurse on + * @b: parent btree node + * @op: pointer to struct btree_op + */ +#define btree(fn, key, b, op, ...) \ +({ \ + int _r, l = (b)->level - 1; \ + bool _w = l <= (op)->lock; \ + struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \ + if (!IS_ERR(_child)) { \ + _child->parent = (b); \ + _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ + rw_unlock(_w, _child); \ + } else \ + _r = PTR_ERR(_child); \ + _r; \ +}) + +/** + * btree_root - call a function on the root of the btree + * @fn: function to call, which will be passed the child node + * @c: cache set + * @op: pointer to struct btree_op + */ +#define btree_root(fn, c, op, ...) \ +({ \ + int _r = -EINTR; \ + do { \ + struct btree *_b = (c)->root; \ + bool _w = insert_lock(op, _b); \ + rw_lock(_w, _b, _b->level); \ + if (_b == (c)->root && \ + _w == insert_lock(op, _b)) { \ + _b->parent = NULL; \ + _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ + } \ + rw_unlock(_w, _b); \ + bch_cannibalize_unlock(c); \ + if (_r == -ENOSPC) { \ + wait_event((c)->try_wait, \ + !(c)->try_harder); \ + _r = -EINTR; \ + } \ + } while (_r == -EINTR); \ + \ + _r; \ +}) + /* Btree key manipulation */ -static void bkey_put(struct cache_set *c, struct bkey *k, int level) +void bkey_put(struct cache_set *c, struct bkey *k) { - if ((level && KEY_OFFSET(k)) || !level) - __bkey_put(c, k); + unsigned i; + + for (i = 0; i < KEY_PTRS(k); i++) + if (ptr_available(c, k, i)) + atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); } /* Btree IO */ @@ -145,6 +210,10 @@ static void bch_btree_node_read_done(struct btree *b) iter->size = b->c->sb.bucket_size / b->c->sb.block_size; iter->used = 0; +#ifdef CONFIG_BCACHE_DEBUG + iter->b = b; +#endif + if (!i->seq) goto err; @@ -160,7 +229,7 @@ static void bch_btree_node_read_done(struct btree *b) goto err; err = "bad magic"; - if (i->magic != bset_magic(b->c)) + if (i->magic != bset_magic(&b->c->sb)) goto err; err = "bad checksum"; @@ -248,10 +317,7 @@ void bch_btree_node_read(struct btree *b) goto err; bch_btree_node_read_done(b); - - spin_lock(&b->c->btree_read_time_lock); bch_time_stats_update(&b->c->btree_read_time, start_time); - spin_unlock(&b->c->btree_read_time_lock); return; err: @@ -327,7 +393,7 @@ static void do_btree_node_write(struct btree *b) b->bio = bch_bbio_alloc(b->c); b->bio->bi_end_io = btree_node_write_endio; - b->bio->bi_private = &b->io.cl; + b->bio->bi_private = cl; b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); bch_bio_map(b->bio, i); @@ -383,7 +449,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent) BUG_ON(b->written >= btree_blocks(b)); BUG_ON(b->written && !i->keys); BUG_ON(b->sets->data->seq != i->seq); - bch_check_key_order(b, i); + bch_check_keys(b, "writing"); cancel_delayed_work(&b->work); @@ -405,6 +471,15 @@ void bch_btree_node_write(struct btree *b, struct closure *parent) bch_bset_init_next(b); } +static void bch_btree_node_write_sync(struct btree *b) +{ + struct closure cl; + + closure_init_stack(&cl); + bch_btree_node_write(b, &cl); + closure_sync(&cl); +} + static void btree_node_write_work(struct work_struct *w) { struct btree *b = container_of(to_delayed_work(w), struct btree, work); @@ -416,7 +491,7 @@ static void btree_node_write_work(struct work_struct *w) rw_unlock(true, b); } -static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) +static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) { struct bset *i = b->sets[b->nsets].data; struct btree_write *w = btree_current_write(b); @@ -429,15 +504,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) set_btree_node_dirty(b); - if (op && op->journal) { + if (journal_ref) { if (w->journal && - journal_pin_cmp(b->c, w, op)) { + journal_pin_cmp(b->c, w->journal, journal_ref)) { atomic_dec_bug(w->journal); w->journal = NULL; } if (!w->journal) { - w->journal = op->journal; + w->journal = journal_ref; atomic_inc(w->journal); } } @@ -566,33 +641,32 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, return b; } -static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) +static int mca_reap(struct btree *b, unsigned min_order, bool flush) { + struct closure cl; + + closure_init_stack(&cl); lockdep_assert_held(&b->c->bucket_lock); if (!down_write_trylock(&b->lock)) return -ENOMEM; - if (b->page_order < min_order) { + BUG_ON(btree_node_dirty(b) && !b->sets[0].data); + + if (b->page_order < min_order || + (!flush && + (btree_node_dirty(b) || + atomic_read(&b->io.cl.remaining) != -1))) { rw_unlock(true, b); return -ENOMEM; } - BUG_ON(btree_node_dirty(b) && !b->sets[0].data); - - if (cl && btree_node_dirty(b)) - bch_btree_node_write(b, NULL); - - if (cl) - closure_wait_event_async(&b->io.wait, cl, - atomic_read(&b->io.cl.remaining) == -1); + if (btree_node_dirty(b)) + bch_btree_node_write_sync(b); - if (btree_node_dirty(b) || - !closure_is_unlocked(&b->io.cl) || - work_pending(&b->work.work)) { - rw_unlock(true, b); - return -EAGAIN; - } + /* wait for any in flight btree write */ + closure_wait_event(&b->io.wait, &cl, + atomic_read(&b->io.cl.remaining) == -1); return 0; } @@ -633,7 +707,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, break; if (++i > 3 && - !mca_reap(b, NULL, 0)) { + !mca_reap(b, 0, false)) { mca_data_free(b); rw_unlock(true, b); freed++; @@ -652,7 +726,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, list_rotate_left(&c->btree_cache); if (!b->accessed && - !mca_reap(b, NULL, 0)) { + !mca_reap(b, 0, false)) { mca_bucket_free(b); mca_data_free(b); rw_unlock(true, b); @@ -723,12 +797,9 @@ int bch_btree_cache_alloc(struct cache_set *c) { unsigned i; - /* XXX: doesn't check for errors */ - - closure_init_unlocked(&c->gc); - for (i = 0; i < mca_reserve(c); i++) - mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); + if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) + return -ENOMEM; list_splice_init(&c->btree_cache, &c->btree_cache_freeable); @@ -775,52 +846,27 @@ out: return b; } -static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, - int level, struct closure *cl) +static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k) { - int ret = -ENOMEM; - struct btree *i; + struct btree *b; trace_bcache_btree_cache_cannibalize(c); - if (!cl) - return ERR_PTR(-ENOMEM); - - /* - * Trying to free up some memory - i.e. reuse some btree nodes - may - * require initiating IO to flush the dirty part of the node. If we're - * running under generic_make_request(), that IO will never finish and - * we would deadlock. Returning -EAGAIN causes the cache lookup code to - * punt to workqueue and retry. - */ - if (current->bio_list) - return ERR_PTR(-EAGAIN); - - if (c->try_harder && c->try_harder != cl) { - closure_wait_event_async(&c->try_wait, cl, !c->try_harder); - return ERR_PTR(-EAGAIN); - } + if (!c->try_harder) { + c->try_harder = current; + c->try_harder_start = local_clock(); + } else if (c->try_harder != current) + return ERR_PTR(-ENOSPC); - c->try_harder = cl; - c->try_harder_start = local_clock(); -retry: - list_for_each_entry_reverse(i, &c->btree_cache, list) { - int r = mca_reap(i, cl, btree_order(k)); - if (!r) - return i; - if (r != -ENOMEM) - ret = r; - } + list_for_each_entry_reverse(b, &c->btree_cache, list) + if (!mca_reap(b, btree_order(k), false)) + return b; - if (ret == -EAGAIN && - closure_blocking(cl)) { - mutex_unlock(&c->bucket_lock); - closure_sync(cl); - mutex_lock(&c->bucket_lock); - goto retry; - } + list_for_each_entry_reverse(b, &c->btree_cache, list) + if (!mca_reap(b, btree_order(k), true)) + return b; - return ERR_PTR(ret); + return ERR_PTR(-ENOMEM); } /* @@ -829,20 +875,21 @@ retry: * cannibalize_bucket() will take. This means every time we unlock the root of * the btree, we need to release this lock if we have it held. */ -void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) +static void bch_cannibalize_unlock(struct cache_set *c) { - if (c->try_harder == cl) { + if (c->try_harder == current) { bch_time_stats_update(&c->try_harder_time, c->try_harder_start); c->try_harder = NULL; - __closure_wake_up(&c->try_wait); + wake_up(&c->try_wait); } } -static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, - int level, struct closure *cl) +static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level) { struct btree *b; + BUG_ON(current->bio_list); + lockdep_assert_held(&c->bucket_lock); if (mca_find(c, k)) @@ -852,14 +899,14 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, * the list. Check if there's any freed nodes there: */ list_for_each_entry(b, &c->btree_cache_freeable, list) - if (!mca_reap(b, NULL, btree_order(k))) + if (!mca_reap(b, btree_order(k), false)) goto out; /* We never free struct btree itself, just the memory that holds the on * disk node. Check the freed list before allocating a new one: */ list_for_each_entry(b, &c->btree_cache_freed, list) - if (!mca_reap(b, NULL, 0)) { + if (!mca_reap(b, 0, false)) { mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); if (!b->sets[0].data) goto err; @@ -884,6 +931,7 @@ out: lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); b->level = level; + b->parent = (void *) ~0UL; mca_reinit(b); @@ -892,7 +940,7 @@ err: if (b) rw_unlock(true, b); - b = mca_cannibalize(c, k, level, cl); + b = mca_cannibalize(c, k); if (!IS_ERR(b)) goto out; @@ -903,17 +951,15 @@ err: * bch_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. * - * If IO is necessary, it uses the closure embedded in struct btree_op to wait; - * if that closure is in non blocking mode, will return -EAGAIN. + * If IO is necessary and running under generic_make_request, returns -EAGAIN. * * The btree node will have either a read or a write lock held, depending on * level and op->lock. */ struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k, - int level, struct btree_op *op) + int level, bool write) { int i = 0; - bool write = level <= op->lock; struct btree *b; BUG_ON(level < 0); @@ -925,7 +971,7 @@ retry: return ERR_PTR(-EAGAIN); mutex_lock(&c->bucket_lock); - b = mca_alloc(c, k, level, &op->cl); + b = mca_alloc(c, k, level); mutex_unlock(&c->bucket_lock); if (!b) @@ -971,7 +1017,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) struct btree *b; mutex_lock(&c->bucket_lock); - b = mca_alloc(c, k, level, NULL); + b = mca_alloc(c, k, level); mutex_unlock(&c->bucket_lock); if (!IS_ERR_OR_NULL(b)) { @@ -982,17 +1028,12 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) /* Btree alloc */ -static void btree_node_free(struct btree *b, struct btree_op *op) +static void btree_node_free(struct btree *b) { unsigned i; trace_bcache_btree_node_free(b); - /* - * The BUG_ON() in btree_node_get() implies that we must have a write - * lock on parent to free or even invalidate a node - */ - BUG_ON(op->lock <= b->level); BUG_ON(b == b->c->root); if (btree_node_dirty(b)) @@ -1015,27 +1056,26 @@ static void btree_node_free(struct btree *b, struct btree_op *op) mutex_unlock(&b->c->bucket_lock); } -struct btree *bch_btree_node_alloc(struct cache_set *c, int level, - struct closure *cl) +struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait) { BKEY_PADDED(key) k; struct btree *b = ERR_PTR(-EAGAIN); mutex_lock(&c->bucket_lock); retry: - if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl)) + if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait)) goto err; + bkey_put(c, &k.key); SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); - b = mca_alloc(c, &k.key, level, cl); + b = mca_alloc(c, &k.key, level); if (IS_ERR(b)) goto err_free; if (!b) { cache_bug(c, "Tried to allocate bucket that was in btree cache"); - __bkey_put(c, &k.key); goto retry; } @@ -1048,7 +1088,6 @@ retry: return b; err_free: bch_bucket_free(c, &k.key); - __bkey_put(c, &k.key); err: mutex_unlock(&c->bucket_lock); @@ -1056,16 +1095,31 @@ err: return b; } -static struct btree *btree_node_alloc_replacement(struct btree *b, - struct closure *cl) +static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait) { - struct btree *n = bch_btree_node_alloc(b->c, b->level, cl); + struct btree *n = bch_btree_node_alloc(b->c, b->level, wait); if (!IS_ERR_OR_NULL(n)) bch_btree_sort_into(b, n); return n; } +static void make_btree_freeing_key(struct btree *b, struct bkey *k) +{ + unsigned i; + + bkey_copy(k, &b->key); + bkey_copy_key(k, &ZERO_KEY); + + for (i = 0; i < KEY_PTRS(k); i++) { + uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1; + + SET_PTR_GEN(k, i, g); + } + + atomic_inc(&b->c->prio_blocked); +} + /* Garbage collection */ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) @@ -1119,12 +1173,10 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) -static int btree_gc_mark_node(struct btree *b, unsigned *keys, - struct gc_stat *gc) +static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) { uint8_t stale = 0; - unsigned last_dev = -1; - struct bcache_device *d = NULL; + unsigned keys = 0, good_keys = 0; struct bkey *k; struct btree_iter iter; struct bset_tree *t; @@ -1132,27 +1184,17 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys, gc->nodes++; for_each_key_filter(b, k, &iter, bch_ptr_invalid) { - if (last_dev != KEY_INODE(k)) { - last_dev = KEY_INODE(k); - - d = KEY_INODE(k) < b->c->nr_uuids - ? b->c->devices[last_dev] - : NULL; - } - stale = max(stale, btree_mark_key(b, k)); + keys++; if (bch_ptr_bad(b, k)) continue; - *keys += bkey_u64s(k); - gc->key_bytes += bkey_u64s(k); gc->nkeys++; + good_keys++; gc->data += KEY_SIZE(k); - if (KEY_DIRTY(k)) - gc->dirty += KEY_SIZE(k); } for (t = b->sets; t <= &b->sets[b->nsets]; t++) @@ -1161,78 +1203,74 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys, bkey_cmp(&b->key, &t->end) < 0, b, "found short btree key in gc"); - return stale; -} - -static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k, - struct btree_op *op) -{ - /* - * We block priorities from being written for the duration of garbage - * collection, so we can't sleep in btree_alloc() -> - * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it - * our closure. - */ - struct btree *n = btree_node_alloc_replacement(b, NULL); - - if (!IS_ERR_OR_NULL(n)) { - swap(b, n); - __bkey_put(b->c, &b->key); + if (b->c->gc_always_rewrite) + return true; - memcpy(k->ptr, b->key.ptr, - sizeof(uint64_t) * KEY_PTRS(&b->key)); + if (stale > 10) + return true; - btree_node_free(n, op); - up_write(&n->lock); - } + if ((keys - good_keys) * 2 > keys) + return true; - return b; + return false; } -/* - * Leaving this at 2 until we've got incremental garbage collection done; it - * could be higher (and has been tested with 4) except that garbage collection - * could take much longer, adversely affecting latency. - */ -#define GC_MERGE_NODES 2U +#define GC_MERGE_NODES 4U struct gc_merge_info { struct btree *b; - struct bkey *k; unsigned keys; }; -static void btree_gc_coalesce(struct btree *b, struct btree_op *op, - struct gc_stat *gc, struct gc_merge_info *r) +static int bch_btree_insert_node(struct btree *, struct btree_op *, + struct keylist *, atomic_t *, struct bkey *); + +static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + struct keylist *keylist, struct gc_stat *gc, + struct gc_merge_info *r) { - unsigned nodes = 0, keys = 0, blocks; - int i; + unsigned i, nodes = 0, keys = 0, blocks; + struct btree *new_nodes[GC_MERGE_NODES]; + struct closure cl; + struct bkey *k; + + memset(new_nodes, 0, sizeof(new_nodes)); + closure_init_stack(&cl); - while (nodes < GC_MERGE_NODES && r[nodes].b) + while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) keys += r[nodes++].keys; blocks = btree_default_blocks(b->c) * 2 / 3; if (nodes < 2 || __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1)) - return; - - for (i = nodes - 1; i >= 0; --i) { - if (r[i].b->written) - r[i].b = btree_gc_alloc(r[i].b, r[i].k, op); + return 0; - if (r[i].b->written) - return; + for (i = 0; i < nodes; i++) { + new_nodes[i] = btree_node_alloc_replacement(r[i].b, false); + if (IS_ERR_OR_NULL(new_nodes[i])) + goto out_nocoalesce; } for (i = nodes - 1; i > 0; --i) { - struct bset *n1 = r[i].b->sets->data; - struct bset *n2 = r[i - 1].b->sets->data; + struct bset *n1 = new_nodes[i]->sets->data; + struct bset *n2 = new_nodes[i - 1]->sets->data; struct bkey *k, *last = NULL; keys = 0; - if (i == 1) { + if (i > 1) { + for (k = n2->start; + k < end(n2); + k = bkey_next(k)) { + if (__set_blocks(n1, n1->keys + keys + + bkey_u64s(k), b->c) > blocks) + break; + + last = k; + keys += bkey_u64s(k); + } + } else { /* * Last node we're not getting rid of - we're getting * rid of the node at r[0]. Have to try and fit all of @@ -1241,37 +1279,27 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op, * length keys (shouldn't be possible in practice, * though) */ - if (__set_blocks(n1, n1->keys + r->keys, - b->c) > btree_blocks(r[i].b)) - return; + if (__set_blocks(n1, n1->keys + n2->keys, + b->c) > btree_blocks(new_nodes[i])) + goto out_nocoalesce; keys = n2->keys; + /* Take the key of the node we're getting rid of */ last = &r->b->key; - } else - for (k = n2->start; - k < end(n2); - k = bkey_next(k)) { - if (__set_blocks(n1, n1->keys + keys + - bkey_u64s(k), b->c) > blocks) - break; - - last = k; - keys += bkey_u64s(k); - } + } BUG_ON(__set_blocks(n1, n1->keys + keys, - b->c) > btree_blocks(r[i].b)); + b->c) > btree_blocks(new_nodes[i])); - if (last) { - bkey_copy_key(&r[i].b->key, last); - bkey_copy_key(r[i].k, last); - } + if (last) + bkey_copy_key(&new_nodes[i]->key, last); memcpy(end(n1), n2->start, (void *) node(n2, keys) - (void *) n2->start); n1->keys += keys; + r[i].keys = n1->keys; memmove(n2->start, node(n2, keys), @@ -1279,95 +1307,176 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op, n2->keys -= keys; - r[i].keys = n1->keys; - r[i - 1].keys = n2->keys; + if (bch_keylist_realloc(keylist, + KEY_PTRS(&new_nodes[i]->key), b->c)) + goto out_nocoalesce; + + bch_btree_node_write(new_nodes[i], &cl); + bch_keylist_add(keylist, &new_nodes[i]->key); } - btree_node_free(r->b, op); - up_write(&r->b->lock); + for (i = 0; i < nodes; i++) { + if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c)) + goto out_nocoalesce; - trace_bcache_btree_gc_coalesce(nodes); + make_btree_freeing_key(r[i].b, keylist->top); + bch_keylist_push(keylist); + } + + /* We emptied out this node */ + BUG_ON(new_nodes[0]->sets->data->keys); + btree_node_free(new_nodes[0]); + rw_unlock(true, new_nodes[0]); + + closure_sync(&cl); + + for (i = 0; i < nodes; i++) { + btree_node_free(r[i].b); + rw_unlock(true, r[i].b); + + r[i].b = new_nodes[i]; + } + + bch_btree_insert_node(b, op, keylist, NULL, NULL); + BUG_ON(!bch_keylist_empty(keylist)); + + memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); + r[nodes - 1].b = ERR_PTR(-EINTR); + trace_bcache_btree_gc_coalesce(nodes); gc->nodes--; - nodes--; - memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes); - memset(&r[nodes], 0, sizeof(struct gc_merge_info)); + /* Invalidated our iterator */ + return -EINTR; + +out_nocoalesce: + closure_sync(&cl); + + while ((k = bch_keylist_pop(keylist))) + if (!bkey_cmp(k, &ZERO_KEY)) + atomic_dec(&b->c->prio_blocked); + + for (i = 0; i < nodes; i++) + if (!IS_ERR_OR_NULL(new_nodes[i])) { + btree_node_free(new_nodes[i]); + rw_unlock(true, new_nodes[i]); + } + return 0; } -static int btree_gc_recurse(struct btree *b, struct btree_op *op, - struct closure *writes, struct gc_stat *gc) +static unsigned btree_gc_count_keys(struct btree *b) { - void write(struct btree *r) - { - if (!r->written) - bch_btree_node_write(r, &op->cl); - else if (btree_node_dirty(r)) - bch_btree_node_write(r, writes); + struct bkey *k; + struct btree_iter iter; + unsigned ret = 0; - up_write(&r->lock); - } + for_each_key_filter(b, k, &iter, bch_ptr_bad) + ret += bkey_u64s(k); + + return ret; +} - int ret = 0, stale; +static int btree_gc_recurse(struct btree *b, struct btree_op *op, + struct closure *writes, struct gc_stat *gc) +{ unsigned i; + int ret = 0; + bool should_rewrite; + struct btree *n; + struct bkey *k; + struct keylist keys; + struct btree_iter iter; struct gc_merge_info r[GC_MERGE_NODES]; + struct gc_merge_info *last = r + GC_MERGE_NODES - 1; - memset(r, 0, sizeof(r)); + bch_keylist_init(&keys); + bch_btree_iter_init(b, &iter, &b->c->gc_done); - while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) { - r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op); + for (i = 0; i < GC_MERGE_NODES; i++) + r[i].b = ERR_PTR(-EINTR); - if (IS_ERR(r->b)) { - ret = PTR_ERR(r->b); - break; + while (1) { + k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); + if (k) { + r->b = bch_btree_node_get(b->c, k, b->level - 1, true); + if (IS_ERR(r->b)) { + ret = PTR_ERR(r->b); + break; + } + + r->keys = btree_gc_count_keys(r->b); + + ret = btree_gc_coalesce(b, op, &keys, gc, r); + if (ret) + break; } - r->keys = 0; - stale = btree_gc_mark_node(r->b, &r->keys, gc); + if (!last->b) + break; - if (!b->written && - (r->b->level || stale > 10 || - b->c->gc_always_rewrite)) - r->b = btree_gc_alloc(r->b, r->k, op); + if (!IS_ERR(last->b)) { + should_rewrite = btree_gc_mark_node(last->b, gc); + if (should_rewrite) { + n = btree_node_alloc_replacement(last->b, + false); - if (r->b->level) - ret = btree_gc_recurse(r->b, op, writes, gc); + if (!IS_ERR_OR_NULL(n)) { + bch_btree_node_write_sync(n); + bch_keylist_add(&keys, &n->key); - if (ret) { - write(r->b); - break; - } + make_btree_freeing_key(last->b, + keys.top); + bch_keylist_push(&keys); + + btree_node_free(last->b); + + bch_btree_insert_node(b, op, &keys, + NULL, NULL); + BUG_ON(!bch_keylist_empty(&keys)); - bkey_copy_key(&b->c->gc_done, r->k); + rw_unlock(true, last->b); + last->b = n; - if (!b->written) - btree_gc_coalesce(b, op, gc, r); + /* Invalidated our iterator */ + ret = -EINTR; + break; + } + } - if (r[GC_MERGE_NODES - 1].b) - write(r[GC_MERGE_NODES - 1].b); + if (last->b->level) { + ret = btree_gc_recurse(last->b, op, writes, gc); + if (ret) + break; + } - memmove(&r[1], &r[0], - sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1)); + bkey_copy_key(&b->c->gc_done, &last->b->key); + + /* + * Must flush leaf nodes before gc ends, since replace + * operations aren't journalled + */ + if (btree_node_dirty(last->b)) + bch_btree_node_write(last->b, writes); + rw_unlock(true, last->b); + } + + memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); + r->b = NULL; - /* When we've got incremental GC working, we'll want to do - * if (should_resched()) - * return -EAGAIN; - */ - cond_resched(); -#if 0 if (need_resched()) { ret = -EAGAIN; break; } -#endif } - for (i = 1; i < GC_MERGE_NODES && r[i].b; i++) - write(r[i].b); + for (i = 0; i < GC_MERGE_NODES; i++) + if (!IS_ERR_OR_NULL(r[i].b)) { + if (btree_node_dirty(r[i].b)) + bch_btree_node_write(r[i].b, writes); + rw_unlock(true, r[i].b); + } - /* Might have freed some children, must remove their keys */ - if (!b->written) - bch_btree_sort(b); + bch_keylist_free(&keys); return ret; } @@ -1376,29 +1485,31 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, struct closure *writes, struct gc_stat *gc) { struct btree *n = NULL; - unsigned keys = 0; - int ret = 0, stale = btree_gc_mark_node(b, &keys, gc); - - if (b->level || stale > 10) - n = btree_node_alloc_replacement(b, NULL); + int ret = 0; + bool should_rewrite; - if (!IS_ERR_OR_NULL(n)) - swap(b, n); + should_rewrite = btree_gc_mark_node(b, gc); + if (should_rewrite) { + n = btree_node_alloc_replacement(b, false); - if (b->level) - ret = btree_gc_recurse(b, op, writes, gc); + if (!IS_ERR_OR_NULL(n)) { + bch_btree_node_write_sync(n); + bch_btree_set_root(n); + btree_node_free(b); + rw_unlock(true, n); - if (!b->written || btree_node_dirty(b)) { - bch_btree_node_write(b, n ? &op->cl : NULL); + return -EINTR; + } } - if (!IS_ERR_OR_NULL(n)) { - closure_sync(&op->cl); - bch_btree_set_root(b); - btree_node_free(n, op); - rw_unlock(true, b); + if (b->level) { + ret = btree_gc_recurse(b, op, writes, gc); + if (ret) + return ret; } + bkey_copy_key(&b->c->gc_done, &b->key); + return ret; } @@ -1479,9 +1590,8 @@ size_t bch_btree_gc_finish(struct cache_set *c) return available; } -static void bch_btree_gc(struct closure *cl) +static void bch_btree_gc(struct cache_set *c) { - struct cache_set *c = container_of(cl, struct cache_set, gc.cl); int ret; unsigned long available; struct gc_stat stats; @@ -1493,47 +1603,73 @@ static void bch_btree_gc(struct closure *cl) memset(&stats, 0, sizeof(struct gc_stat)); closure_init_stack(&writes); - bch_btree_op_init_stack(&op); - op.lock = SHRT_MAX; + bch_btree_op_init(&op, SHRT_MAX); btree_gc_start(c); - atomic_inc(&c->prio_blocked); - - ret = btree_root(gc_root, c, &op, &writes, &stats); - closure_sync(&op.cl); - closure_sync(&writes); - - if (ret) { - pr_warn("gc failed!"); - continue_at(cl, bch_btree_gc, bch_gc_wq); - } + do { + ret = btree_root(gc_root, c, &op, &writes, &stats); + closure_sync(&writes); - /* Possibly wait for new UUIDs or whatever to hit disk */ - bch_journal_meta(c, &op.cl); - closure_sync(&op.cl); + if (ret && ret != -EAGAIN) + pr_warn("gc failed!"); + } while (ret); available = bch_btree_gc_finish(c); - - atomic_dec(&c->prio_blocked); wake_up_allocators(c); bch_time_stats_update(&c->btree_gc_time, start_time); stats.key_bytes *= sizeof(uint64_t); - stats.dirty <<= 9; stats.data <<= 9; stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); trace_bcache_gc_end(c); - continue_at(cl, bch_moving_gc, bch_gc_wq); + bch_moving_gc(c); +} + +static int bch_gc_thread(void *arg) +{ + struct cache_set *c = arg; + struct cache *ca; + unsigned i; + + while (1) { +again: + bch_btree_gc(c); + + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + break; + + mutex_lock(&c->bucket_lock); + + for_each_cache(ca, c, i) + if (ca->invalidate_needs_gc) { + mutex_unlock(&c->bucket_lock); + set_current_state(TASK_RUNNING); + goto again; + } + + mutex_unlock(&c->bucket_lock); + + try_to_freeze(); + schedule(); + } + + return 0; } -void bch_queue_gc(struct cache_set *c) +int bch_gc_thread_start(struct cache_set *c) { - closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl); + c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); + if (IS_ERR(c->gc_thread)) + return PTR_ERR(c->gc_thread); + + set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); + return 0; } /* Initial partial gc */ @@ -1541,9 +1677,9 @@ void bch_queue_gc(struct cache_set *c) static int bch_btree_check_recurse(struct btree *b, struct btree_op *op, unsigned long **seen) { - int ret; + int ret = 0; unsigned i; - struct bkey *k; + struct bkey *k, *p = NULL; struct bucket *g; struct btree_iter iter; @@ -1570,31 +1706,32 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op, } if (b->level) { - k = bch_next_recurse_key(b, &ZERO_KEY); + bch_btree_iter_init(b, &iter, NULL); - while (k) { - struct bkey *p = bch_next_recurse_key(b, k); - if (p) - btree_node_prefetch(b->c, p, b->level - 1); + do { + k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); + if (k) + btree_node_prefetch(b->c, k, b->level - 1); - ret = btree(check_recurse, k, b, op, seen); - if (ret) - return ret; + if (p) + ret = btree(check_recurse, p, b, op, seen); - k = p; - } + p = k; + } while (p && !ret); } return 0; } -int bch_btree_check(struct cache_set *c, struct btree_op *op) +int bch_btree_check(struct cache_set *c) { int ret = -ENOMEM; unsigned i; unsigned long *seen[MAX_CACHES_PER_SET]; + struct btree_op op; memset(seen, 0, sizeof(seen)); + bch_btree_op_init(&op, SHRT_MAX); for (i = 0; c->cache[i]; i++) { size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); @@ -1606,7 +1743,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op) memset(seen[i], 0xFF, n); } - ret = btree_root(check_recurse, c, op, seen); + ret = btree_root(check_recurse, c, &op, seen); err: for (i = 0; i < MAX_CACHES_PER_SET; i++) kfree(seen[i]); @@ -1628,10 +1765,9 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert) bch_bset_fix_lookup_table(b, where); } -static bool fix_overlapping_extents(struct btree *b, - struct bkey *insert, +static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, struct btree_iter *iter, - struct btree_op *op) + struct bkey *replace_key) { void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) { @@ -1659,39 +1795,38 @@ static bool fix_overlapping_extents(struct btree *b, * We might overlap with 0 size extents; we can't skip these * because if they're in the set we're inserting to we have to * adjust them so they don't overlap with the key we're - * inserting. But we don't want to check them for BTREE_REPLACE + * inserting. But we don't want to check them for replace * operations. */ - if (op->type == BTREE_REPLACE && - KEY_SIZE(k)) { + if (replace_key && KEY_SIZE(k)) { /* * k might have been split since we inserted/found the * key we're replacing */ unsigned i; uint64_t offset = KEY_START(k) - - KEY_START(&op->replace); + KEY_START(replace_key); /* But it must be a subset of the replace key */ - if (KEY_START(k) < KEY_START(&op->replace) || - KEY_OFFSET(k) > KEY_OFFSET(&op->replace)) + if (KEY_START(k) < KEY_START(replace_key) || + KEY_OFFSET(k) > KEY_OFFSET(replace_key)) goto check_failed; /* We didn't find a key that we were supposed to */ if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; - if (KEY_PTRS(&op->replace) != KEY_PTRS(k)) + if (KEY_PTRS(replace_key) != KEY_PTRS(k)) goto check_failed; /* skip past gen */ offset <<= 8; - BUG_ON(!KEY_PTRS(&op->replace)); + BUG_ON(!KEY_PTRS(replace_key)); - for (i = 0; i < KEY_PTRS(&op->replace); i++) - if (k->ptr[i] != op->replace.ptr[i] + offset) + for (i = 0; i < KEY_PTRS(replace_key); i++) + if (k->ptr[i] != replace_key->ptr[i] + offset) goto check_failed; sectors_found = KEY_OFFSET(k) - KEY_START(insert); @@ -1742,6 +1877,9 @@ static bool fix_overlapping_extents(struct btree *b, if (bkey_cmp(insert, k) < 0) { bch_cut_front(insert, k); } else { + if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) + old_offset = KEY_START(insert); + if (bkey_written(b, k) && bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { /* @@ -1759,9 +1897,8 @@ static bool fix_overlapping_extents(struct btree *b, } check_failed: - if (op->type == BTREE_REPLACE) { + if (replace_key) { if (!sectors_found) { - op->insert_collision = true; return true; } else if (sectors_found < KEY_SIZE(insert)) { SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - @@ -1774,7 +1911,7 @@ check_failed: } static bool btree_insert_key(struct btree *b, struct btree_op *op, - struct bkey *k) + struct bkey *k, struct bkey *replace_key) { struct bset *i = b->sets[b->nsets].data; struct bkey *m, *prev; @@ -1786,22 +1923,23 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op, if (!b->level) { struct btree_iter iter; - struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0); /* * bset_search() returns the first key that is strictly greater * than the search key - but for back merging, we want to find - * the first key that is greater than or equal to KEY_START(k) - - * unless KEY_START(k) is 0. + * the previous key. */ - if (KEY_OFFSET(&search)) - SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1); - prev = NULL; - m = bch_btree_iter_init(b, &iter, &search); + m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k))); - if (fix_overlapping_extents(b, k, &iter, op)) + if (fix_overlapping_extents(b, k, &iter, replace_key)) { + op->insert_collision = true; return false; + } + + if (KEY_DIRTY(k)) + bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), + KEY_START(k), KEY_SIZE(k)); while (m != end(i) && bkey_cmp(k, &START_KEY(m)) > 0) @@ -1825,84 +1963,80 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op, if (m != end(i) && bch_bkey_try_merge(b, k, m)) goto copy; - } else + } else { + BUG_ON(replace_key); m = bch_bset_search(b, &b->sets[b->nsets], k); + } insert: shift_keys(b, m, k); copy: bkey_copy(m, k); merged: - if (KEY_DIRTY(k)) - bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), - KEY_START(k), KEY_SIZE(k)); - - bch_check_keys(b, "%u for %s", status, op_type(op)); + bch_check_keys(b, "%u for %s", status, + replace_key ? "replace" : "insert"); if (b->level && !KEY_OFFSET(k)) btree_current_write(b)->prio_blocked++; - trace_bcache_btree_insert_key(b, k, op->type, status); + trace_bcache_btree_insert_key(b, k, replace_key != NULL, status); return true; } -static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op) +static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, + struct keylist *insert_keys, + struct bkey *replace_key) { bool ret = false; - struct bkey *k; - unsigned oldsize = bch_count_data(b); - - while ((k = bch_keylist_pop(&op->keys))) { - bkey_put(b->c, k, b->level); - ret |= btree_insert_key(b, op, k); - } - - BUG_ON(bch_count_data(b) < oldsize); - return ret; -} + int oldsize = bch_count_data(b); -bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op, - struct bio *bio) -{ - bool ret = false; - uint64_t btree_ptr = b->key.ptr[0]; - unsigned long seq = b->seq; - BKEY_PADDED(k) tmp; + while (!bch_keylist_empty(insert_keys)) { + struct bset *i = write_block(b); + struct bkey *k = insert_keys->keys; - rw_unlock(false, b); - rw_lock(true, b, b->level); + if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c) + > btree_blocks(b)) + break; - if (b->key.ptr[0] != btree_ptr || - b->seq != seq + 1 || - should_split(b)) - goto out; + if (bkey_cmp(k, &b->key) <= 0) { + if (!b->level) + bkey_put(b->c, k); - op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio)); + ret |= btree_insert_key(b, op, k, replace_key); + bch_keylist_pop_front(insert_keys); + } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { + BKEY_PADDED(key) temp; + bkey_copy(&temp.key, insert_keys->keys); - SET_KEY_PTRS(&op->replace, 1); - get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); + bch_cut_back(&b->key, &temp.key); + bch_cut_front(&b->key, insert_keys->keys); - SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV); + ret |= btree_insert_key(b, op, &temp.key, replace_key); + break; + } else { + break; + } + } - bkey_copy(&tmp.k, &op->replace); + BUG_ON(!bch_keylist_empty(insert_keys) && b->level); - BUG_ON(op->type != BTREE_INSERT); - BUG_ON(!btree_insert_key(b, op, &tmp.k)); - ret = true; -out: - downgrade_write(&b->lock); + BUG_ON(bch_count_data(b) < oldsize); return ret; } -static int btree_split(struct btree *b, struct btree_op *op) +static int btree_split(struct btree *b, struct btree_op *op, + struct keylist *insert_keys, + struct bkey *replace_key) { - bool split, root = b == b->c->root; + bool split; struct btree *n1, *n2 = NULL, *n3 = NULL; uint64_t start_time = local_clock(); + struct closure cl; + struct keylist parent_keys; - if (b->level) - set_closure_blocking(&op->cl); + closure_init_stack(&cl); + bch_keylist_init(&parent_keys); - n1 = btree_node_alloc_replacement(b, &op->cl); + n1 = btree_node_alloc_replacement(b, true); if (IS_ERR(n1)) goto err; @@ -1913,19 +2047,20 @@ static int btree_split(struct btree *b, struct btree_op *op) trace_bcache_btree_node_split(b, n1->sets[0].data->keys); - n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); + n2 = bch_btree_node_alloc(b->c, b->level, true); if (IS_ERR(n2)) goto err_free1; - if (root) { - n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl); + if (!b->parent) { + n3 = bch_btree_node_alloc(b->c, b->level + 1, true); if (IS_ERR(n3)) goto err_free2; } - bch_btree_insert_keys(n1, op); + bch_btree_insert_keys(n1, op, insert_keys, replace_key); - /* Has to be a linear search because we don't have an auxiliary + /* + * Has to be a linear search because we don't have an auxiliary * search tree yet */ @@ -1944,60 +2079,57 @@ static int btree_split(struct btree *b, struct btree_op *op) bkey_copy_key(&n2->key, &b->key); - bch_keylist_add(&op->keys, &n2->key); - bch_btree_node_write(n2, &op->cl); + bch_keylist_add(&parent_keys, &n2->key); + bch_btree_node_write(n2, &cl); rw_unlock(true, n2); } else { trace_bcache_btree_node_compact(b, n1->sets[0].data->keys); - bch_btree_insert_keys(n1, op); + bch_btree_insert_keys(n1, op, insert_keys, replace_key); } - bch_keylist_add(&op->keys, &n1->key); - bch_btree_node_write(n1, &op->cl); + bch_keylist_add(&parent_keys, &n1->key); + bch_btree_node_write(n1, &cl); if (n3) { + /* Depth increases, make a new root */ bkey_copy_key(&n3->key, &MAX_KEY); - bch_btree_insert_keys(n3, op); - bch_btree_node_write(n3, &op->cl); + bch_btree_insert_keys(n3, op, &parent_keys, NULL); + bch_btree_node_write(n3, &cl); - closure_sync(&op->cl); + closure_sync(&cl); bch_btree_set_root(n3); rw_unlock(true, n3); - } else if (root) { - op->keys.top = op->keys.bottom; - closure_sync(&op->cl); - bch_btree_set_root(n1); - } else { - unsigned i; - bkey_copy(op->keys.top, &b->key); - bkey_copy_key(op->keys.top, &ZERO_KEY); + btree_node_free(b); + } else if (!b->parent) { + /* Root filled up but didn't need to be split */ + closure_sync(&cl); + bch_btree_set_root(n1); - for (i = 0; i < KEY_PTRS(&b->key); i++) { - uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1; + btree_node_free(b); + } else { + /* Split a non root node */ + closure_sync(&cl); + make_btree_freeing_key(b, parent_keys.top); + bch_keylist_push(&parent_keys); - SET_PTR_GEN(op->keys.top, i, g); - } + btree_node_free(b); - bch_keylist_push(&op->keys); - closure_sync(&op->cl); - atomic_inc(&b->c->prio_blocked); + bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); + BUG_ON(!bch_keylist_empty(&parent_keys)); } rw_unlock(true, n1); - btree_node_free(b, op); bch_time_stats_update(&b->c->btree_split_time, start_time); return 0; err_free2: - __bkey_put(n2->c, &n2->key); - btree_node_free(n2, op); + btree_node_free(n2); rw_unlock(true, n2); err_free1: - __bkey_put(n1->c, &n1->key); - btree_node_free(n1, op); + btree_node_free(n1); rw_unlock(true, n1); err: if (n3 == ERR_PTR(-EAGAIN) || @@ -2009,116 +2141,126 @@ err: return -ENOMEM; } -static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, - struct keylist *stack_keys) +static int bch_btree_insert_node(struct btree *b, struct btree_op *op, + struct keylist *insert_keys, + atomic_t *journal_ref, + struct bkey *replace_key) { - if (b->level) { - int ret; - struct bkey *insert = op->keys.bottom; - struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert)); - - if (!k) { - btree_bug(b, "no key to recurse on at level %i/%i", - b->level, b->c->root->level); + BUG_ON(b->level && replace_key); - op->keys.top = op->keys.bottom; - return -EIO; + if (should_split(b)) { + if (current->bio_list) { + op->lock = b->c->root->level + 1; + return -EAGAIN; + } else if (op->lock <= b->c->root->level) { + op->lock = b->c->root->level + 1; + return -EINTR; + } else { + /* Invalidated all iterators */ + return btree_split(b, op, insert_keys, replace_key) ?: + -EINTR; } + } else { + BUG_ON(write_block(b) != b->sets[b->nsets].data); - if (bkey_cmp(insert, k) > 0) { - unsigned i; - - if (op->type == BTREE_REPLACE) { - __bkey_put(b->c, insert); - op->keys.top = op->keys.bottom; - op->insert_collision = true; - return 0; - } + if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { + if (!b->level) + bch_btree_leaf_dirty(b, journal_ref); + else + bch_btree_node_write_sync(b); + } - for (i = 0; i < KEY_PTRS(insert); i++) - atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin); + return 0; + } +} - bkey_copy(stack_keys->top, insert); +int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, + struct bkey *check_key) +{ + int ret = -EINTR; + uint64_t btree_ptr = b->key.ptr[0]; + unsigned long seq = b->seq; + struct keylist insert; + bool upgrade = op->lock == -1; - bch_cut_back(k, insert); - bch_cut_front(k, stack_keys->top); + bch_keylist_init(&insert); - bch_keylist_push(stack_keys); - } + if (upgrade) { + rw_unlock(false, b); + rw_lock(true, b, b->level); - ret = btree(insert_recurse, k, b, op, stack_keys); - if (ret) - return ret; + if (b->key.ptr[0] != btree_ptr || + b->seq != seq + 1) + goto out; } - if (!bch_keylist_empty(&op->keys)) { - if (should_split(b)) { - if (op->lock <= b->c->root->level) { - BUG_ON(b->level); - op->lock = b->c->root->level + 1; - return -EINTR; - } - return btree_split(b, op); - } + SET_KEY_PTRS(check_key, 1); + get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); - BUG_ON(write_block(b) != b->sets[b->nsets].data); + SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); - if (bch_btree_insert_keys(b, op)) { - if (!b->level) - bch_btree_leaf_dirty(b, op); - else - bch_btree_node_write(b, &op->cl); - } - } + bch_keylist_add(&insert, check_key); - return 0; + ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); + + BUG_ON(!ret && !bch_keylist_empty(&insert)); +out: + if (upgrade) + downgrade_write(&b->lock); + return ret; } -int bch_btree_insert(struct btree_op *op, struct cache_set *c) +struct btree_insert_op { + struct btree_op op; + struct keylist *keys; + atomic_t *journal_ref; + struct bkey *replace_key; +}; + +int btree_insert_fn(struct btree_op *b_op, struct btree *b) { - int ret = 0; - struct keylist stack_keys; + struct btree_insert_op *op = container_of(b_op, + struct btree_insert_op, op); - /* - * Don't want to block with the btree locked unless we have to, - * otherwise we get deadlocks with try_harder and between split/gc - */ - clear_closure_blocking(&op->cl); - - BUG_ON(bch_keylist_empty(&op->keys)); - bch_keylist_copy(&stack_keys, &op->keys); - bch_keylist_init(&op->keys); - - while (!bch_keylist_empty(&stack_keys) || - !bch_keylist_empty(&op->keys)) { - if (bch_keylist_empty(&op->keys)) { - bch_keylist_add(&op->keys, - bch_keylist_pop(&stack_keys)); - op->lock = 0; - } + int ret = bch_btree_insert_node(b, &op->op, op->keys, + op->journal_ref, op->replace_key); + if (ret && !bch_keylist_empty(op->keys)) + return ret; + else + return MAP_DONE; +} - ret = btree_root(insert_recurse, c, op, &stack_keys); +int bch_btree_insert(struct cache_set *c, struct keylist *keys, + atomic_t *journal_ref, struct bkey *replace_key) +{ + struct btree_insert_op op; + int ret = 0; - if (ret == -EAGAIN) { - ret = 0; - closure_sync(&op->cl); - } else if (ret) { - struct bkey *k; + BUG_ON(current->bio_list); + BUG_ON(bch_keylist_empty(keys)); + + bch_btree_op_init(&op.op, 0); + op.keys = keys; + op.journal_ref = journal_ref; + op.replace_key = replace_key; + + while (!ret && !bch_keylist_empty(keys)) { + op.op.lock = 0; + ret = bch_btree_map_leaf_nodes(&op.op, c, + &START_KEY(keys->keys), + btree_insert_fn); + } - pr_err("error %i trying to insert key for %s", - ret, op_type(op)); + if (ret) { + struct bkey *k; - while ((k = bch_keylist_pop(&stack_keys) ?: - bch_keylist_pop(&op->keys))) - bkey_put(c, k, 0); - } - } + pr_err("error %i", ret); - bch_keylist_free(&stack_keys); + while ((k = bch_keylist_pop(keys))) + bkey_put(c, k); + } else if (op.op.insert_collision) + ret = -ESRCH; - if (op->journal) - atomic_dec_bug(op->journal); - op->journal = NULL; return ret; } @@ -2141,132 +2283,81 @@ void bch_btree_set_root(struct btree *b) mutex_unlock(&b->c->bucket_lock); b->c->root = b; - __bkey_put(b->c, &b->key); bch_journal_meta(b->c, &cl); closure_sync(&cl); } -/* Cache lookup */ +/* Map across nodes or keys */ -static int submit_partial_cache_miss(struct btree *b, struct btree_op *op, - struct bkey *k) +static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, + struct bkey *from, + btree_map_nodes_fn *fn, int flags) { - struct search *s = container_of(op, struct search, op); - struct bio *bio = &s->bio.bio; - int ret = 0; + int ret = MAP_CONTINUE; + + if (b->level) { + struct bkey *k; + struct btree_iter iter; - while (!ret && - !op->lookup_done) { - unsigned sectors = INT_MAX; + bch_btree_iter_init(b, &iter, from); - if (KEY_INODE(k) == op->inode) { - if (KEY_START(k) <= bio->bi_sector) - break; + while ((k = bch_btree_iter_next_filter(&iter, b, + bch_ptr_bad))) { + ret = btree(map_nodes_recurse, k, b, + op, from, fn, flags); + from = NULL; - sectors = min_t(uint64_t, sectors, - KEY_START(k) - bio->bi_sector); + if (ret != MAP_CONTINUE) + return ret; } - - ret = s->d->cache_miss(b, s, bio, sectors); } + if (!b->level || flags == MAP_ALL_NODES) + ret = fn(op, b); + return ret; } -/* - * Read from a single key, handling the initial cache miss if the key starts in - * the middle of the bio - */ -static int submit_partial_cache_hit(struct btree *b, struct btree_op *op, - struct bkey *k) +int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, + struct bkey *from, btree_map_nodes_fn *fn, int flags) { - struct search *s = container_of(op, struct search, op); - struct bio *bio = &s->bio.bio; - unsigned ptr; - struct bio *n; - - int ret = submit_partial_cache_miss(b, op, k); - if (ret || op->lookup_done) - return ret; - - /* XXX: figure out best pointer - for multiple cache devices */ - ptr = 0; - - PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; - - while (!op->lookup_done && - KEY_INODE(k) == op->inode && - bio->bi_sector < KEY_OFFSET(k)) { - struct bkey *bio_key; - sector_t sector = PTR_OFFSET(k, ptr) + - (bio->bi_sector - KEY_START(k)); - unsigned sectors = min_t(uint64_t, INT_MAX, - KEY_OFFSET(k) - bio->bi_sector); - - n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); - if (n == bio) - op->lookup_done = true; - - bio_key = &container_of(n, struct bbio, bio)->key; - - /* - * The bucket we're reading from might be reused while our bio - * is in flight, and we could then end up reading the wrong - * data. - * - * We guard against this by checking (in cache_read_endio()) if - * the pointer is stale again; if so, we treat it as an error - * and reread from the backing device (but we don't pass that - * error up anywhere). - */ - - bch_bkey_copy_single_ptr(bio_key, k, ptr); - SET_PTR_OFFSET(bio_key, 0, sector); - - n->bi_end_io = bch_cache_read_endio; - n->bi_private = &s->cl; - - __bch_submit_bbio(n, b->c); - } - - return 0; + return btree_root(map_nodes_recurse, c, op, from, fn, flags); } -int bch_btree_search_recurse(struct btree *b, struct btree_op *op) +static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, + struct bkey *from, btree_map_keys_fn *fn, + int flags) { - struct search *s = container_of(op, struct search, op); - struct bio *bio = &s->bio.bio; - - int ret = 0; + int ret = MAP_CONTINUE; struct bkey *k; struct btree_iter iter; - bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0)); - do { - k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); - if (!k) { - /* - * b->key would be exactly what we want, except that - * pointers to btree nodes have nonzero size - we - * wouldn't go far enough - */ + bch_btree_iter_init(b, &iter, from); - ret = submit_partial_cache_miss(b, op, - &KEY(KEY_INODE(&b->key), - KEY_OFFSET(&b->key), 0)); - break; - } + while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) { + ret = !b->level + ? fn(op, b, k) + : btree(map_keys_recurse, k, b, op, from, fn, flags); + from = NULL; + + if (ret != MAP_CONTINUE) + return ret; + } - ret = b->level - ? btree(search_recurse, k, b, op) - : submit_partial_cache_hit(b, op, k); - } while (!ret && - !op->lookup_done); + if (!b->level && (flags & MAP_END_KEY)) + ret = fn(op, b, &KEY(KEY_INODE(&b->key), + KEY_OFFSET(&b->key), 0)); return ret; } +int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, + struct bkey *from, btree_map_keys_fn *fn, int flags) +{ + return btree_root(map_keys_recurse, c, op, from, fn, flags); +} + /* Keybuf code */ static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) @@ -2285,80 +2376,79 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); } -static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, - struct keybuf *buf, struct bkey *end, - keybuf_pred_fn *pred) -{ - struct btree_iter iter; - bch_btree_iter_init(b, &iter, &buf->last_scanned); - - while (!array_freelist_empty(&buf->freelist)) { - struct bkey *k = bch_btree_iter_next_filter(&iter, b, - bch_ptr_bad); - - if (!b->level) { - if (!k) { - buf->last_scanned = b->key; - break; - } +struct refill { + struct btree_op op; + unsigned nr_found; + struct keybuf *buf; + struct bkey *end; + keybuf_pred_fn *pred; +}; - buf->last_scanned = *k; - if (bkey_cmp(&buf->last_scanned, end) >= 0) - break; +static int refill_keybuf_fn(struct btree_op *op, struct btree *b, + struct bkey *k) +{ + struct refill *refill = container_of(op, struct refill, op); + struct keybuf *buf = refill->buf; + int ret = MAP_CONTINUE; - if (pred(buf, k)) { - struct keybuf_key *w; + if (bkey_cmp(k, refill->end) >= 0) { + ret = MAP_DONE; + goto out; + } - spin_lock(&buf->lock); + if (!KEY_SIZE(k)) /* end key */ + goto out; - w = array_alloc(&buf->freelist); + if (refill->pred(buf, k)) { + struct keybuf_key *w; - w->private = NULL; - bkey_copy(&w->key, k); + spin_lock(&buf->lock); - if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) - array_free(&buf->freelist, w); + w = array_alloc(&buf->freelist); + if (!w) { + spin_unlock(&buf->lock); + return MAP_DONE; + } - spin_unlock(&buf->lock); - } - } else { - if (!k) - break; + w->private = NULL; + bkey_copy(&w->key, k); - btree(refill_keybuf, k, b, op, buf, end, pred); - /* - * Might get an error here, but can't really do anything - * and it'll get logged elsewhere. Just read what we - * can. - */ + if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) + array_free(&buf->freelist, w); + else + refill->nr_found++; - if (bkey_cmp(&buf->last_scanned, end) >= 0) - break; + if (array_freelist_empty(&buf->freelist)) + ret = MAP_DONE; - cond_resched(); - } + spin_unlock(&buf->lock); } - - return 0; +out: + buf->last_scanned = *k; + return ret; } void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, struct bkey *end, keybuf_pred_fn *pred) { struct bkey start = buf->last_scanned; - struct btree_op op; - bch_btree_op_init_stack(&op); + struct refill refill; cond_resched(); - btree_root(refill_keybuf, c, &op, buf, end, pred); - closure_sync(&op.cl); + bch_btree_op_init(&refill.op, -1); + refill.nr_found = 0; + refill.buf = buf; + refill.end = end; + refill.pred = pred; + + bch_btree_map_keys(&refill.op, c, &buf->last_scanned, + refill_keybuf_fn, MAP_END_KEY); - pr_debug("found %s keys from %llu:%llu to %llu:%llu", - RB_EMPTY_ROOT(&buf->keys) ? "no" : - array_freelist_empty(&buf->freelist) ? "some" : "a few", - KEY_INODE(&start), KEY_OFFSET(&start), - KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned)); + trace_bcache_keyscan(refill.nr_found, + KEY_INODE(&start), KEY_OFFSET(&start), + KEY_INODE(&buf->last_scanned), + KEY_OFFSET(&buf->last_scanned)); spin_lock(&buf->lock); @@ -2436,9 +2526,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf) } struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, - struct keybuf *buf, - struct bkey *end, - keybuf_pred_fn *pred) + struct keybuf *buf, + struct bkey *end, + keybuf_pred_fn *pred) { struct keybuf_key *ret; @@ -2471,14 +2561,12 @@ void bch_btree_exit(void) { if (btree_io_wq) destroy_workqueue(btree_io_wq); - if (bch_gc_wq) - destroy_workqueue(bch_gc_wq); } int __init bch_btree_init(void) { - if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) || - !(btree_io_wq = create_singlethread_workqueue("bch_btree_io"))) + btree_io_wq = create_singlethread_workqueue("bch_btree_io"); + if (!btree_io_wq) return -ENOMEM; return 0; diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 3333d3723633..767e75570896 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -125,6 +125,7 @@ struct btree { unsigned long seq; struct rw_semaphore lock; struct cache_set *c; + struct btree *parent; unsigned long flags; uint16_t written; /* would be nice to kill */ @@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k) static inline void set_gc_sectors(struct cache_set *c) { - atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8); -} - -static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k) -{ - return __bch_ptr_invalid(b->c, b->level, k); + atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); } static inline struct bkey *bch_btree_iter_init(struct btree *b, @@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b, return __bch_btree_iter_init(b, iter, search, b->sets); } +static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k) +{ + if (b->level) + return bch_btree_ptr_invalid(b->c, k); + else + return bch_extent_ptr_invalid(b->c, k); +} + +void bkey_put(struct cache_set *c, struct bkey *k); + /* Looping macros */ #define for_each_cached_btree(b, c, iter) \ @@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b, /* Recursing down the btree */ struct btree_op { - struct closure cl; - struct cache_set *c; - - /* Journal entry we have a refcount on */ - atomic_t *journal; - - /* Bio to be inserted into the cache */ - struct bio *cache_bio; - - unsigned inode; - - uint16_t write_prio; - /* Btree level at which we start taking write locks */ short lock; - /* Btree insertion type */ - enum { - BTREE_INSERT, - BTREE_REPLACE - } type:8; - - unsigned csum:1; - unsigned skip:1; - unsigned flush_journal:1; - - unsigned insert_data_done:1; - unsigned lookup_done:1; unsigned insert_collision:1; - - /* Anything after this point won't get zeroed in do_bio_hook() */ - - /* Keys to be inserted */ - struct keylist keys; - BKEY_PADDED(replace); }; -enum { - BTREE_INSERT_STATUS_INSERT, - BTREE_INSERT_STATUS_BACK_MERGE, - BTREE_INSERT_STATUS_OVERWROTE, - BTREE_INSERT_STATUS_FRONT_MERGE, -}; - -void bch_btree_op_init_stack(struct btree_op *); +static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) +{ + memset(op, 0, sizeof(struct btree_op)); + op->lock = write_lock_level; +} static inline void rw_lock(bool w, struct btree *b, int level) { @@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level) static inline void rw_unlock(bool w, struct btree *b) { -#ifdef CONFIG_BCACHE_EDEBUG - unsigned i; - - if (w && b->key.ptr[0]) - for (i = 0; i <= b->nsets; i++) - bch_check_key_order(b, b->sets[i].data); -#endif - if (w) b->seq++; (w ? up_write : up_read)(&b->lock); } -#define insert_lock(s, b) ((b)->level <= (s)->lock) +void bch_btree_node_read(struct btree *); +void bch_btree_node_write(struct btree *, struct closure *); -/* - * These macros are for recursing down the btree - they handle the details of - * locking and looking up nodes in the cache for you. They're best treated as - * mere syntax when reading code that uses them. - * - * op->lock determines whether we take a read or a write lock at a given depth. - * If you've got a read lock and find that you need a write lock (i.e. you're - * going to have to split), set op->lock and return -EINTR; btree_root() will - * call you again and you'll have the correct lock. - */ +void bch_btree_set_root(struct btree *); +struct btree *bch_btree_node_alloc(struct cache_set *, int, bool); +struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool); -/** - * btree - recurse down the btree on a specified key - * @fn: function to call, which will be passed the child node - * @key: key to recurse on - * @b: parent btree node - * @op: pointer to struct btree_op - */ -#define btree(fn, key, b, op, ...) \ -({ \ - int _r, l = (b)->level - 1; \ - bool _w = l <= (op)->lock; \ - struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \ - if (!IS_ERR(_b)) { \ - _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ - rw_unlock(_w, _b); \ - } else \ - _r = PTR_ERR(_b); \ - _r; \ -}) - -/** - * btree_root - call a function on the root of the btree - * @fn: function to call, which will be passed the child node - * @c: cache set - * @op: pointer to struct btree_op - */ -#define btree_root(fn, c, op, ...) \ -({ \ - int _r = -EINTR; \ - do { \ - struct btree *_b = (c)->root; \ - bool _w = insert_lock(op, _b); \ - rw_lock(_w, _b, _b->level); \ - if (_b == (c)->root && \ - _w == insert_lock(op, _b)) \ - _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ - rw_unlock(_w, _b); \ - bch_cannibalize_unlock(c, &(op)->cl); \ - } while (_r == -EINTR); \ - \ - _r; \ -}) +int bch_btree_insert_check_key(struct btree *, struct btree_op *, + struct bkey *); +int bch_btree_insert(struct cache_set *, struct keylist *, + atomic_t *, struct bkey *); + +int bch_gc_thread_start(struct cache_set *); +size_t bch_btree_gc_finish(struct cache_set *); +void bch_moving_gc(struct cache_set *); +int bch_btree_check(struct cache_set *); +uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); -static inline bool should_split(struct btree *b) +static inline void wake_up_gc(struct cache_set *c) { - struct bset *i = write_block(b); - return b->written >= btree_blocks(b) || - (i->seq == b->sets[0].data->seq && - b->written + __set_blocks(i, i->keys + 15, b->c) - > btree_blocks(b)); + if (c->gc_thread) + wake_up_process(c->gc_thread); } -void bch_btree_node_read(struct btree *); -void bch_btree_node_write(struct btree *, struct closure *); +#define MAP_DONE 0 +#define MAP_CONTINUE 1 -void bch_cannibalize_unlock(struct cache_set *, struct closure *); -void bch_btree_set_root(struct btree *); -struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); -struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, - int, struct btree_op *); +#define MAP_ALL_NODES 0 +#define MAP_LEAF_NODES 1 -bool bch_btree_insert_check_key(struct btree *, struct btree_op *, - struct bio *); -int bch_btree_insert(struct btree_op *, struct cache_set *); +#define MAP_END_KEY 1 -int bch_btree_search_recurse(struct btree *, struct btree_op *); +typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *); +int __bch_btree_map_nodes(struct btree_op *, struct cache_set *, + struct bkey *, btree_map_nodes_fn *, int); -void bch_queue_gc(struct cache_set *); -size_t bch_btree_gc_finish(struct cache_set *); -void bch_moving_gc(struct closure *); -int bch_btree_check(struct cache_set *, struct btree_op *); -uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); +static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, + struct bkey *from, btree_map_nodes_fn *fn) +{ + return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES); +} + +static inline int bch_btree_map_leaf_nodes(struct btree_op *op, + struct cache_set *c, + struct bkey *from, + btree_map_nodes_fn *fn) +{ + return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); +} + +typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *, + struct bkey *); +int bch_btree_map_keys(struct btree_op *, struct cache_set *, + struct bkey *, btree_map_keys_fn *, int); + +typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); void bch_keybuf_init(struct keybuf *); -void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, - keybuf_pred_fn *); +void bch_refill_keybuf(struct cache_set *, struct keybuf *, + struct bkey *, keybuf_pred_fn *); bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, struct bkey *); void bch_keybuf_del(struct keybuf *, struct keybuf_key *); diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 9aba2017f0d1..dfff2410322e 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -11,17 +11,6 @@ #include "closure.h" -void closure_queue(struct closure *cl) -{ - struct workqueue_struct *wq = cl->wq; - if (wq) { - INIT_WORK(&cl->work, cl->work.func); - BUG_ON(!queue_work(wq, &cl->work)); - } else - cl->fn(cl); -} -EXPORT_SYMBOL_GPL(closure_queue); - #define CL_FIELD(type, field) \ case TYPE_ ## type: \ return &container_of(cl, struct type, cl)->field @@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl) { switch (cl->type) { CL_FIELD(closure_with_waitlist, wait); - CL_FIELD(closure_with_waitlist_and_timer, wait); - default: - return NULL; - } -} - -static struct timer_list *closure_timer(struct closure *cl) -{ - switch (cl->type) { - CL_FIELD(closure_with_timer, timer); - CL_FIELD(closure_with_waitlist_and_timer, timer); default: return NULL; } @@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) int r = flags & CLOSURE_REMAINING_MASK; BUG_ON(flags & CLOSURE_GUARD_MASK); - BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING))); + BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR)); /* Must deliver precisely one wakeup */ if (r == 1 && (flags & CLOSURE_SLEEPING)) @@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) if (!r) { if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { - /* CLOSURE_BLOCKING might be set - clear it */ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); closure_queue(cl); @@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v) { closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); } -EXPORT_SYMBOL_GPL(closure_sub); +EXPORT_SYMBOL(closure_sub); void closure_put(struct closure *cl) { closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); } -EXPORT_SYMBOL_GPL(closure_put); +EXPORT_SYMBOL(closure_put); static void set_waiting(struct closure *cl, unsigned long f) { @@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list) closure_sub(cl, CLOSURE_WAITING + 1); } } -EXPORT_SYMBOL_GPL(__closure_wake_up); +EXPORT_SYMBOL(__closure_wake_up); bool closure_wait(struct closure_waitlist *list, struct closure *cl) { @@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl) return true; } -EXPORT_SYMBOL_GPL(closure_wait); +EXPORT_SYMBOL(closure_wait); /** * closure_sync() - sleep until a closure a closure has nothing left to wait on @@ -169,7 +146,7 @@ void closure_sync(struct closure *cl) __closure_end_sleep(cl); } -EXPORT_SYMBOL_GPL(closure_sync); +EXPORT_SYMBOL(closure_sync); /** * closure_trylock() - try to acquire the closure, without waiting @@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent) CLOSURE_REMAINING_INITIALIZER) != -1) return false; - closure_set_ret_ip(cl); - smp_mb(); + cl->parent = parent; if (parent) closure_get(parent); + closure_set_ret_ip(cl); closure_debug_create(cl); return true; } -EXPORT_SYMBOL_GPL(closure_trylock); +EXPORT_SYMBOL(closure_trylock); void __closure_lock(struct closure *cl, struct closure *parent, struct closure_waitlist *wait_list) @@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent, if (closure_trylock(cl, parent)) return; - closure_wait_event_sync(wait_list, &wait, - atomic_read(&cl->remaining) == -1); + closure_wait_event(wait_list, &wait, + atomic_read(&cl->remaining) == -1); } } -EXPORT_SYMBOL_GPL(__closure_lock); - -static void closure_delay_timer_fn(unsigned long data) -{ - struct closure *cl = (struct closure *) data; - closure_sub(cl, CLOSURE_TIMER + 1); -} - -void do_closure_timer_init(struct closure *cl) -{ - struct timer_list *timer = closure_timer(cl); - - init_timer(timer); - timer->data = (unsigned long) cl; - timer->function = closure_delay_timer_fn; -} -EXPORT_SYMBOL_GPL(do_closure_timer_init); - -bool __closure_delay(struct closure *cl, unsigned long delay, - struct timer_list *timer) -{ - if (atomic_read(&cl->remaining) & CLOSURE_TIMER) - return false; - - BUG_ON(timer_pending(timer)); - - timer->expires = jiffies + delay; - - atomic_add(CLOSURE_TIMER + 1, &cl->remaining); - add_timer(timer); - return true; -} -EXPORT_SYMBOL_GPL(__closure_delay); - -void __closure_flush(struct closure *cl, struct timer_list *timer) -{ - if (del_timer(timer)) - closure_sub(cl, CLOSURE_TIMER + 1); -} -EXPORT_SYMBOL_GPL(__closure_flush); - -void __closure_flush_sync(struct closure *cl, struct timer_list *timer) -{ - if (del_timer_sync(timer)) - closure_sub(cl, CLOSURE_TIMER + 1); -} -EXPORT_SYMBOL_GPL(__closure_flush_sync); +EXPORT_SYMBOL(__closure_lock); #ifdef CONFIG_BCACHE_CLOSURES_DEBUG @@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl) list_add(&cl->all, &closure_list); spin_unlock_irqrestore(&closure_list_lock, flags); } -EXPORT_SYMBOL_GPL(closure_debug_create); +EXPORT_SYMBOL(closure_debug_create); void closure_debug_destroy(struct closure *cl) { @@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl) list_del(&cl->all); spin_unlock_irqrestore(&closure_list_lock, flags); } -EXPORT_SYMBOL_GPL(closure_debug_destroy); +EXPORT_SYMBOL(closure_debug_destroy); static struct dentry *debug; @@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data) cl, (void *) cl->ip, cl->fn, cl->parent, r & CLOSURE_REMAINING_MASK); - seq_printf(f, "%s%s%s%s%s%s\n", + seq_printf(f, "%s%s%s%s\n", test_bit(WORK_STRUCT_PENDING, work_data_bits(&cl->work)) ? "Q" : "", r & CLOSURE_RUNNING ? "R" : "", - r & CLOSURE_BLOCKING ? "B" : "", r & CLOSURE_STACK ? "S" : "", - r & CLOSURE_SLEEPING ? "Sl" : "", - r & CLOSURE_TIMER ? "T" : ""); + r & CLOSURE_SLEEPING ? "Sl" : ""); if (r & CLOSURE_WAITING) seq_printf(f, " W %pF\n", diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 00039924ea9d..9762f1be3304 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -155,21 +155,6 @@ * delayed_work embeds a work item and a timer_list. The important thing is, use * it exactly like you would a regular closure and closure_put() will magically * handle everything for you. - * - * We've got closures that embed timers, too. They're called, appropriately - * enough: - * struct closure_with_timer; - * - * This gives you access to closure_delay(). It takes a refcount for a specified - * number of jiffies - you could then call closure_sync() (for a slightly - * convoluted version of msleep()) or continue_at() - which gives you the same - * effect as using a delayed work item, except you can reuse the work_struct - * already embedded in struct closure. - * - * Lastly, there's struct closure_with_waitlist_and_timer. It does what you - * probably expect, if you happen to need the features of both. (You don't - * really want to know how all this is implemented, but if I've done my job - * right you shouldn't have to care). */ struct closure; @@ -182,16 +167,11 @@ struct closure_waitlist { enum closure_type { TYPE_closure = 0, TYPE_closure_with_waitlist = 1, - TYPE_closure_with_timer = 2, - TYPE_closure_with_waitlist_and_timer = 3, - MAX_CLOSURE_TYPE = 3, + MAX_CLOSURE_TYPE = 1, }; enum closure_state { /* - * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of - * waiting asynchronously - * * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by * the thread that owns the closure, and cleared by the thread that's * waking up the closure. @@ -200,10 +180,6 @@ enum closure_state { * - indicates that cl->task is valid and closure_put() may wake it up. * Only set or cleared by the thread that owns the closure. * - * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure - * has an outstanding timer. Must be set by the thread that owns the - * closure, and cleared by the timer function when the timer goes off. - * * The rest are for debugging and don't affect behaviour: * * CLOSURE_RUNNING: Set when a closure is running (i.e. by @@ -218,19 +194,17 @@ enum closure_state { * closure with this flag set */ - CLOSURE_BITS_START = (1 << 19), - CLOSURE_DESTRUCTOR = (1 << 19), - CLOSURE_BLOCKING = (1 << 21), - CLOSURE_WAITING = (1 << 23), - CLOSURE_SLEEPING = (1 << 25), - CLOSURE_TIMER = (1 << 27), + CLOSURE_BITS_START = (1 << 23), + CLOSURE_DESTRUCTOR = (1 << 23), + CLOSURE_WAITING = (1 << 25), + CLOSURE_SLEEPING = (1 << 27), CLOSURE_RUNNING = (1 << 29), CLOSURE_STACK = (1 << 31), }; #define CLOSURE_GUARD_MASK \ - ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \ - CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1) + ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \ + CLOSURE_RUNNING|CLOSURE_STACK) << 1) #define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1) #define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING) @@ -268,17 +242,6 @@ struct closure_with_waitlist { struct closure_waitlist wait; }; -struct closure_with_timer { - struct closure cl; - struct timer_list timer; -}; - -struct closure_with_waitlist_and_timer { - struct closure cl; - struct closure_waitlist wait; - struct timer_list timer; -}; - extern unsigned invalid_closure_type(void); #define __CLOSURE_TYPE(cl, _t) \ @@ -289,14 +252,11 @@ extern unsigned invalid_closure_type(void); ( \ __CLOSURE_TYPE(cl, closure) \ __CLOSURE_TYPE(cl, closure_with_waitlist) \ - __CLOSURE_TYPE(cl, closure_with_timer) \ - __CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \ invalid_closure_type() \ ) void closure_sub(struct closure *cl, int v); void closure_put(struct closure *cl); -void closure_queue(struct closure *cl); void __closure_wake_up(struct closure_waitlist *list); bool closure_wait(struct closure_waitlist *list, struct closure *cl); void closure_sync(struct closure *cl); @@ -305,12 +265,6 @@ bool closure_trylock(struct closure *cl, struct closure *parent); void __closure_lock(struct closure *cl, struct closure *parent, struct closure_waitlist *wait_list); -void do_closure_timer_init(struct closure *cl); -bool __closure_delay(struct closure *cl, unsigned long delay, - struct timer_list *timer); -void __closure_flush(struct closure *cl, struct timer_list *timer); -void __closure_flush_sync(struct closure *cl, struct timer_list *timer); - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG void closure_debug_init(void); @@ -354,11 +308,6 @@ static inline void closure_set_stopped(struct closure *cl) atomic_sub(CLOSURE_RUNNING, &cl->remaining); } -static inline bool closure_is_stopped(struct closure *cl) -{ - return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING); -} - static inline bool closure_is_unlocked(struct closure *cl) { return atomic_read(&cl->remaining) == -1; @@ -367,14 +316,6 @@ static inline bool closure_is_unlocked(struct closure *cl) static inline void do_closure_init(struct closure *cl, struct closure *parent, bool running) { - switch (cl->type) { - case TYPE_closure_with_timer: - case TYPE_closure_with_waitlist_and_timer: - do_closure_timer_init(cl); - default: - break; - } - cl->parent = parent; if (parent) closure_get(parent); @@ -429,8 +370,7 @@ do { \ static inline void closure_init_stack(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); - atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER| - CLOSURE_BLOCKING|CLOSURE_STACK); + atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK); } /** @@ -461,24 +401,6 @@ do { \ #define closure_lock(cl, parent) \ __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait) -/** - * closure_delay() - delay some number of jiffies - * @cl: the closure that will sleep - * @delay: the delay in jiffies - * - * Takes a refcount on @cl which will be released after @delay jiffies; this may - * be used to have a function run after a delay with continue_at(), or - * closure_sync() may be used for a convoluted version of msleep(). - */ -#define closure_delay(cl, delay) \ - __closure_delay(__to_internal_closure(cl), delay, &(cl)->timer) - -#define closure_flush(cl) \ - __closure_flush(__to_internal_closure(cl), &(cl)->timer) - -#define closure_flush_sync(cl) \ - __closure_flush_sync(__to_internal_closure(cl), &(cl)->timer) - static inline void __closure_end_sleep(struct closure *cl) { __set_current_state(TASK_RUNNING); @@ -498,40 +420,6 @@ static inline void __closure_start_sleep(struct closure *cl) } /** - * closure_blocking() - returns true if the closure is in blocking mode. - * - * If a closure is in blocking mode, closure_wait_event() will sleep until the - * condition is true instead of waiting asynchronously. - */ -static inline bool closure_blocking(struct closure *cl) -{ - return atomic_read(&cl->remaining) & CLOSURE_BLOCKING; -} - -/** - * set_closure_blocking() - put a closure in blocking mode. - * - * If a closure is in blocking mode, closure_wait_event() will sleep until the - * condition is true instead of waiting asynchronously. - * - * Not thread safe - can only be called by the thread running the closure. - */ -static inline void set_closure_blocking(struct closure *cl) -{ - if (!closure_blocking(cl)) - atomic_add(CLOSURE_BLOCKING, &cl->remaining); -} - -/* - * Not thread safe - can only be called by the thread running the closure. - */ -static inline void clear_closure_blocking(struct closure *cl) -{ - if (closure_blocking(cl)) - atomic_sub(CLOSURE_BLOCKING, &cl->remaining); -} - -/** * closure_wake_up() - wake up all closures on a wait list. */ static inline void closure_wake_up(struct closure_waitlist *list) @@ -561,63 +449,36 @@ static inline void closure_wake_up(struct closure_waitlist *list) * refcount on our closure. If this was a stack allocated closure, that would be * bad. */ -#define __closure_wait_event(list, cl, condition, _block) \ +#define closure_wait_event(list, cl, condition) \ ({ \ - bool block = _block; \ typeof(condition) ret; \ \ while (1) { \ ret = (condition); \ if (ret) { \ __closure_wake_up(list); \ - if (block) \ - closure_sync(cl); \ - \ + closure_sync(cl); \ break; \ } \ \ - if (block) \ - __closure_start_sleep(cl); \ - \ - if (!closure_wait(list, cl)) { \ - if (!block) \ - break; \ + __closure_start_sleep(cl); \ \ + if (!closure_wait(list, cl)) \ schedule(); \ - } \ } \ \ ret; \ }) -/** - * closure_wait_event() - wait on a condition, synchronously or asynchronously. - * @list: the wait list to wait on - * @cl: the closure that is doing the waiting - * @condition: a C expression for the event to wait for - * - * If the closure is in blocking mode, sleeps until the @condition evaluates to - * true - exactly like wait_event(). - * - * If the closure is not in blocking mode, waits asynchronously; if the - * condition is currently false the @cl is put onto @list and returns. @list - * owns a refcount on @cl; closure_sync() or continue_at() may be used later to - * wait for another thread to wake up @list, which drops the refcount on @cl. - * - * Returns the value of @condition; @cl will be on @list iff @condition was - * false. - * - * closure_wake_up(@list) must be called after changing any variable that could - * cause @condition to become true. - */ -#define closure_wait_event(list, cl, condition) \ - __closure_wait_event(list, cl, condition, closure_blocking(cl)) - -#define closure_wait_event_async(list, cl, condition) \ - __closure_wait_event(list, cl, condition, false) - -#define closure_wait_event_sync(list, cl, condition) \ - __closure_wait_event(list, cl, condition, true) +static inline void closure_queue(struct closure *cl) +{ + struct workqueue_struct *wq = cl->wq; + if (wq) { + INIT_WORK(&cl->work, cl->work.func); + BUG_ON(!queue_work(wq, &cl->work)); + } else + cl->fn(cl); +} static inline void set_closure_fn(struct closure *cl, closure_fn *fn, struct workqueue_struct *wq) @@ -642,7 +503,7 @@ do { \ #define continue_at_nobarrier(_cl, _fn, _wq) \ do { \ set_closure_fn(_cl, _fn, _wq); \ - closure_queue(cl); \ + closure_queue(_cl); \ return; \ } while (0) diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 88e6411eab4f..264fcfbd6290 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -8,7 +8,6 @@ #include "bcache.h" #include "btree.h" #include "debug.h" -#include "request.h" #include <linux/console.h> #include <linux/debugfs.h> @@ -77,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k) return out - buf; } -int bch_btree_to_text(char *buf, size_t size, const struct btree *b) -{ - return scnprintf(buf, size, "%zu level %i/%i", - PTR_BUCKET_NR(b->c, &b->key, 0), - b->level, b->c->root ? b->c->root->level : -1); -} - -#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) - -static bool skipped_backwards(struct btree *b, struct bkey *k) -{ - return bkey_cmp(k, (!b->level) - ? &START_KEY(bkey_next(k)) - : bkey_next(k)) > 0; -} +#ifdef CONFIG_BCACHE_DEBUG static void dump_bset(struct btree *b, struct bset *i) { - struct bkey *k; + struct bkey *k, *next; unsigned j; char buf[80]; - for (k = i->start; k < end(i); k = bkey_next(k)) { + for (k = i->start; k < end(i); k = next) { + next = bkey_next(k); + bch_bkey_to_text(buf, sizeof(buf), k); printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), (uint64_t *) k - i->d, i->keys, buf); @@ -115,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i) printk(" %s\n", bch_ptr_status(b->c, k)); - if (bkey_next(k) < end(i) && - skipped_backwards(b, k)) + if (next < end(i) && + bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0) printk(KERN_ERR "Key skipped backwards\n"); } } -#endif +static void bch_dump_bucket(struct btree *b) +{ + unsigned i; -#ifdef CONFIG_BCACHE_DEBUG + console_lock(); + for (i = 0; i <= b->nsets; i++) + dump_bset(b, b->sets[i].data); + console_unlock(); +} void bch_btree_verify(struct btree *b, struct bset *new) { @@ -176,66 +169,44 @@ void bch_btree_verify(struct btree *b, struct bset *new) mutex_unlock(&b->c->verify_lock); } -static void data_verify_endio(struct bio *bio, int error) -{ - struct closure *cl = bio->bi_private; - closure_put(cl); -} - -void bch_data_verify(struct search *s) +void bch_data_verify(struct cached_dev *dc, struct bio *bio) { char name[BDEVNAME_SIZE]; - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - struct closure *cl = &s->cl; struct bio *check; struct bio_vec *bv; int i; - if (!s->unaligned_bvec) - bio_for_each_segment(bv, s->orig_bio, i) - bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; - - check = bio_clone(s->orig_bio, GFP_NOIO); + check = bio_clone(bio, GFP_NOIO); if (!check) return; if (bio_alloc_pages(check, GFP_NOIO)) goto out_put; - check->bi_rw = READ_SYNC; - check->bi_private = cl; - check->bi_end_io = data_verify_endio; - - closure_bio_submit(check, cl, &dc->disk); - closure_sync(cl); + submit_bio_wait(READ_SYNC, check); - bio_for_each_segment(bv, s->orig_bio, i) { - void *p1 = kmap(bv->bv_page); - void *p2 = kmap(check->bi_io_vec[i].bv_page); + bio_for_each_segment(bv, bio, i) { + void *p1 = kmap_atomic(bv->bv_page); + void *p2 = page_address(check->bi_io_vec[i].bv_page); - if (memcmp(p1 + bv->bv_offset, - p2 + bv->bv_offset, - bv->bv_len)) - printk(KERN_ERR - "bcache (%s): verify failed at sector %llu\n", - bdevname(dc->bdev, name), - (uint64_t) s->orig_bio->bi_sector); + cache_set_err_on(memcmp(p1 + bv->bv_offset, + p2 + bv->bv_offset, + bv->bv_len), + dc->disk.c, + "verify failed at dev %s sector %llu", + bdevname(dc->bdev, name), + (uint64_t) bio->bi_sector); - kunmap(bv->bv_page); - kunmap(check->bi_io_vec[i].bv_page); + kunmap_atomic(p1); } - __bio_for_each_segment(bv, check, i, 0) + bio_for_each_segment_all(bv, check, i) __free_page(bv->bv_page); out_put: bio_put(check); } -#endif - -#ifdef CONFIG_BCACHE_EDEBUG - -unsigned bch_count_data(struct btree *b) +int __bch_count_data(struct btree *b) { unsigned ret = 0; struct btree_iter iter; @@ -247,72 +218,60 @@ unsigned bch_count_data(struct btree *b) return ret; } -static void vdump_bucket_and_panic(struct btree *b, const char *fmt, - va_list args) -{ - unsigned i; - char buf[80]; - - console_lock(); - - for (i = 0; i <= b->nsets; i++) - dump_bset(b, b->sets[i].data); - - vprintk(fmt, args); - - console_unlock(); - - bch_btree_to_text(buf, sizeof(buf), b); - panic("at %s\n", buf); -} - -void bch_check_key_order_msg(struct btree *b, struct bset *i, - const char *fmt, ...) -{ - struct bkey *k; - - if (!i->keys) - return; - - for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k)) - if (skipped_backwards(b, k)) { - va_list args; - va_start(args, fmt); - - vdump_bucket_and_panic(b, fmt, args); - va_end(args); - } -} - -void bch_check_keys(struct btree *b, const char *fmt, ...) +void __bch_check_keys(struct btree *b, const char *fmt, ...) { va_list args; struct bkey *k, *p = NULL; struct btree_iter iter; - - if (b->level) - return; + const char *err; for_each_key(b, k, &iter) { - if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) { - printk(KERN_ERR "Keys out of order:\n"); - goto bug; - } - - if (bch_ptr_invalid(b, k)) - continue; - - if (p && bkey_cmp(p, &START_KEY(k)) > 0) { - printk(KERN_ERR "Overlapping keys:\n"); - goto bug; + if (!b->level) { + err = "Keys out of order"; + if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) + goto bug; + + if (bch_ptr_invalid(b, k)) + continue; + + err = "Overlapping keys"; + if (p && bkey_cmp(p, &START_KEY(k)) > 0) + goto bug; + } else { + if (bch_ptr_bad(b, k)) + continue; + + err = "Duplicate keys"; + if (p && !bkey_cmp(p, k)) + goto bug; } p = k; } + + err = "Key larger than btree node key"; + if (p && bkey_cmp(p, &b->key) > 0) + goto bug; + return; bug: + bch_dump_bucket(b); + va_start(args, fmt); - vdump_bucket_and_panic(b, fmt, args); + vprintk(fmt, args); va_end(args); + + panic("bcache error: %s:\n", err); +} + +void bch_btree_iter_next_check(struct btree_iter *iter) +{ + struct bkey *k = iter->data->k, *next = bkey_next(k); + + if (next < iter->data->end && + bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) { + bch_dump_bucket(iter->b); + panic("Key skipped backwards\n"); + } } #endif diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h index 1c39b5a2489b..2ede60e31874 100644 --- a/drivers/md/bcache/debug.h +++ b/drivers/md/bcache/debug.h @@ -4,40 +4,44 @@ /* Btree/bkey debug printing */ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k); -int bch_btree_to_text(char *buf, size_t size, const struct btree *b); - -#ifdef CONFIG_BCACHE_EDEBUG - -unsigned bch_count_data(struct btree *); -void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...); -void bch_check_keys(struct btree *, const char *, ...); - -#define bch_check_key_order(b, i) \ - bch_check_key_order_msg(b, i, "keys out of order") -#define EBUG_ON(cond) BUG_ON(cond) - -#else /* EDEBUG */ - -#define bch_count_data(b) 0 -#define bch_check_key_order(b, i) do {} while (0) -#define bch_check_key_order_msg(b, i, ...) do {} while (0) -#define bch_check_keys(b, ...) do {} while (0) -#define EBUG_ON(cond) do {} while (0) - -#endif #ifdef CONFIG_BCACHE_DEBUG void bch_btree_verify(struct btree *, struct bset *); -void bch_data_verify(struct search *); +void bch_data_verify(struct cached_dev *, struct bio *); +int __bch_count_data(struct btree *); +void __bch_check_keys(struct btree *, const char *, ...); +void bch_btree_iter_next_check(struct btree_iter *); + +#define EBUG_ON(cond) BUG_ON(cond) +#define expensive_debug_checks(c) ((c)->expensive_debug_checks) +#define key_merging_disabled(c) ((c)->key_merging_disabled) +#define bypass_torture_test(d) ((d)->bypass_torture_test) #else /* DEBUG */ static inline void bch_btree_verify(struct btree *b, struct bset *i) {} -static inline void bch_data_verify(struct search *s) {}; +static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} +static inline int __bch_count_data(struct btree *b) { return -1; } +static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {} +static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} + +#define EBUG_ON(cond) do { if (cond); } while (0) +#define expensive_debug_checks(c) 0 +#define key_merging_disabled(c) 0 +#define bypass_torture_test(d) 0 #endif +#define bch_count_data(b) \ + (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1) + +#define bch_check_keys(b, ...) \ +do { \ + if (expensive_debug_checks((b)->c)) \ + __bch_check_keys(b, __VA_ARGS__); \ +} while (0) + #ifdef CONFIG_DEBUG_FS void bch_debug_init_cache_set(struct cache_set *); #else diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 8435f81e5d85..ecdaa671bd50 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -7,7 +7,6 @@ #include "bcache.h" #include "btree.h" #include "debug.h" -#include "request.h" #include <trace/events/bcache.h> @@ -31,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error) } static int journal_read_bucket(struct cache *ca, struct list_head *list, - struct btree_op *op, unsigned bucket_index) + unsigned bucket_index) { struct journal_device *ja = &ca->journal; struct bio *bio = &ja->bio; struct journal_replay *i; struct jset *j, *data = ca->set->journal.w[0].data; + struct closure cl; unsigned len, left, offset = 0; int ret = 0; sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); + closure_init_stack(&cl); + pr_debug("reading %llu", (uint64_t) bucket); while (offset < ca->sb.bucket_size) { @@ -55,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset; bio->bi_size = len << 9; bio->bi_end_io = journal_read_endio; - bio->bi_private = &op->cl; + bio->bi_private = &cl; bch_bio_map(bio, data); - closure_bio_submit(bio, &op->cl, ca); - closure_sync(&op->cl); + closure_bio_submit(bio, &cl, ca); + closure_sync(&cl); /* This function could be simpler now since we no longer write * journal entries that overlap bucket boundaries; this means @@ -72,7 +74,7 @@ reread: left = ca->sb.bucket_size - offset; struct list_head *where; size_t blocks, bytes = set_bytes(j); - if (j->magic != jset_magic(ca->set)) + if (j->magic != jset_magic(&ca->sb)) return ret; if (bytes > left << 9) @@ -129,12 +131,11 @@ next_set: return ret; } -int bch_journal_read(struct cache_set *c, struct list_head *list, - struct btree_op *op) +int bch_journal_read(struct cache_set *c, struct list_head *list) { #define read_bucket(b) \ ({ \ - int ret = journal_read_bucket(ca, list, op, b); \ + int ret = journal_read_bucket(ca, list, b); \ __set_bit(b, bitmap); \ if (ret < 0) \ return ret; \ @@ -292,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) } } -int bch_journal_replay(struct cache_set *s, struct list_head *list, - struct btree_op *op) +int bch_journal_replay(struct cache_set *s, struct list_head *list) { int ret = 0, keys = 0, entries = 0; struct bkey *k; @@ -301,31 +301,30 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list, list_entry(list->prev, struct journal_replay, list); uint64_t start = i->j.last_seq, end = i->j.seq, n = start; + struct keylist keylist; + + bch_keylist_init(&keylist); list_for_each_entry(i, list, list) { BUG_ON(i->pin && atomic_read(i->pin) != 1); - if (n != i->j.seq) - pr_err( - "journal entries %llu-%llu missing! (replaying %llu-%llu)\n", - n, i->j.seq - 1, start, end); + cache_set_err_on(n != i->j.seq, s, +"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); for (k = i->j.start; k < end(&i->j); k = bkey_next(k)) { trace_bcache_journal_replay_key(k); - bkey_copy(op->keys.top, k); - bch_keylist_push(&op->keys); - - op->journal = i->pin; - atomic_inc(op->journal); + bkey_copy(keylist.top, k); + bch_keylist_push(&keylist); - ret = bch_btree_insert(op, s); + ret = bch_btree_insert(s, &keylist, i->pin, NULL); if (ret) goto err; - BUG_ON(!bch_keylist_empty(&op->keys)); + BUG_ON(!bch_keylist_empty(&keylist)); keys++; cond_resched(); @@ -339,14 +338,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list, pr_info("journal replay done, %i keys in %i entries, seq %llu", keys, entries, end); - +err: while (!list_empty(list)) { i = list_first_entry(list, struct journal_replay, list); list_del(&i->list); kfree(i); } -err: - closure_sync(&op->cl); + return ret; } @@ -358,48 +356,35 @@ static void btree_flush_write(struct cache_set *c) * Try to find the btree node with that references the oldest journal * entry, best is our current candidate and is locked if non NULL: */ - struct btree *b, *best = NULL; - unsigned iter; + struct btree *b, *best; + unsigned i; +retry: + best = NULL; + + for_each_cached_btree(b, c, i) + if (btree_current_write(b)->journal) { + if (!best) + best = b; + else if (journal_pin_cmp(c, + btree_current_write(best)->journal, + btree_current_write(b)->journal)) { + best = b; + } + } - for_each_cached_btree(b, c, iter) { - if (!down_write_trylock(&b->lock)) - continue; + b = best; + if (b) { + rw_lock(true, b, b->level); - if (!btree_node_dirty(b) || - !btree_current_write(b)->journal) { + if (!btree_current_write(b)->journal) { rw_unlock(true, b); - continue; + /* We raced */ + goto retry; } - if (!best) - best = b; - else if (journal_pin_cmp(c, - btree_current_write(best), - btree_current_write(b))) { - rw_unlock(true, best); - best = b; - } else - rw_unlock(true, b); + bch_btree_node_write(b, NULL); + rw_unlock(true, b); } - - if (best) - goto out; - - /* We can't find the best btree node, just pick the first */ - list_for_each_entry(b, &c->btree_cache, list) - if (!b->level && btree_node_dirty(b)) { - best = b; - rw_lock(true, best, best->level); - goto found; - } - -out: - if (!best) - return; -found: - if (btree_node_dirty(best)) - bch_btree_node_write(best, NULL); - rw_unlock(true, best); } #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) @@ -495,7 +480,7 @@ static void journal_reclaim(struct cache_set *c) do_journal_discard(ca); if (c->journal.blocks_free) - return; + goto out; /* * Allocate: @@ -521,7 +506,7 @@ static void journal_reclaim(struct cache_set *c) if (n) c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; - +out: if (!journal_full(&c->journal)) __closure_wake_up(&c->journal.wait); } @@ -554,32 +539,26 @@ static void journal_write_endio(struct bio *bio, int error) struct journal_write *w = bio->bi_private; cache_set_err_on(error, w->c, "journal io error"); - closure_put(&w->c->journal.io.cl); + closure_put(&w->c->journal.io); } static void journal_write(struct closure *); static void journal_write_done(struct closure *cl) { - struct journal *j = container_of(cl, struct journal, io.cl); - struct cache_set *c = container_of(j, struct cache_set, journal); - + struct journal *j = container_of(cl, struct journal, io); struct journal_write *w = (j->cur == j->w) ? &j->w[1] : &j->w[0]; __closure_wake_up(&w->wait); - - if (c->journal_delay_ms) - closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms)); - - continue_at(cl, journal_write, system_wq); + continue_at_nobarrier(cl, journal_write, system_wq); } static void journal_write_unlocked(struct closure *cl) __releases(c->journal.lock) { - struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl); + struct cache_set *c = container_of(cl, struct cache_set, journal.io); struct cache *ca; struct journal_write *w = c->journal.cur; struct bkey *k = &c->journal.key; @@ -617,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl) for_each_cache(ca, c, i) w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; - w->data->magic = jset_magic(c); + w->data->magic = jset_magic(&c->sb); w->data->version = BCACHE_JSET_VERSION; w->data->last_seq = last_seq(&c->journal); w->data->csum = csum_set(w->data); @@ -660,121 +639,134 @@ static void journal_write_unlocked(struct closure *cl) static void journal_write(struct closure *cl) { - struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl); + struct cache_set *c = container_of(cl, struct cache_set, journal.io); spin_lock(&c->journal.lock); journal_write_unlocked(cl); } -static void __journal_try_write(struct cache_set *c, bool noflush) +static void journal_try_write(struct cache_set *c) __releases(c->journal.lock) { - struct closure *cl = &c->journal.io.cl; + struct closure *cl = &c->journal.io; + struct journal_write *w = c->journal.cur; - if (!closure_trylock(cl, &c->cl)) - spin_unlock(&c->journal.lock); - else if (noflush && journal_full(&c->journal)) { - spin_unlock(&c->journal.lock); - continue_at(cl, journal_write, system_wq); - } else + w->need_write = true; + + if (closure_trylock(cl, &c->cl)) journal_write_unlocked(cl); + else + spin_unlock(&c->journal.lock); } -#define journal_try_write(c) __journal_try_write(c, false) - -void bch_journal_meta(struct cache_set *c, struct closure *cl) +static struct journal_write *journal_wait_for_write(struct cache_set *c, + unsigned nkeys) { - struct journal_write *w; + size_t sectors; + struct closure cl; - if (CACHE_SYNC(&c->sb)) { - spin_lock(&c->journal.lock); + closure_init_stack(&cl); + + spin_lock(&c->journal.lock); - w = c->journal.cur; - w->need_write = true; + while (1) { + struct journal_write *w = c->journal.cur; - if (cl) - BUG_ON(!closure_wait(&w->wait, cl)); + sectors = __set_blocks(w->data, w->data->keys + nkeys, + c) * c->sb.block_size; - closure_flush(&c->journal.io); - __journal_try_write(c, true); + if (sectors <= min_t(size_t, + c->journal.blocks_free * c->sb.block_size, + PAGE_SECTORS << JSET_BITS)) + return w; + + /* XXX: tracepoint */ + if (!journal_full(&c->journal)) { + trace_bcache_journal_entry_full(c); + + /* + * XXX: If we were inserting so many keys that they + * won't fit in an _empty_ journal write, we'll + * deadlock. For now, handle this in + * bch_keylist_realloc() - but something to think about. + */ + BUG_ON(!w->data->keys); + + closure_wait(&w->wait, &cl); + journal_try_write(c); /* unlocks */ + } else { + trace_bcache_journal_full(c); + + closure_wait(&c->journal.wait, &cl); + journal_reclaim(c); + spin_unlock(&c->journal.lock); + + btree_flush_write(c); + } + + closure_sync(&cl); + spin_lock(&c->journal.lock); } } +static void journal_write_work(struct work_struct *work) +{ + struct cache_set *c = container_of(to_delayed_work(work), + struct cache_set, + journal.work); + spin_lock(&c->journal.lock); + journal_try_write(c); +} + /* * Entry point to the journalling code - bio_insert() and btree_invalidate() * pass bch_journal() a list of keys to be journalled, and then * bch_journal() hands those same keys off to btree_insert_async() */ -void bch_journal(struct closure *cl) +atomic_t *bch_journal(struct cache_set *c, + struct keylist *keys, + struct closure *parent) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct cache_set *c = op->c; struct journal_write *w; - size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list; - - if (op->type != BTREE_INSERT || - !CACHE_SYNC(&c->sb)) - goto out; + atomic_t *ret; - /* - * If we're looping because we errored, might already be waiting on - * another journal write: - */ - while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING) - closure_sync(cl->parent); + if (!CACHE_SYNC(&c->sb)) + return NULL; - spin_lock(&c->journal.lock); + w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); - if (journal_full(&c->journal)) { - trace_bcache_journal_full(c); + memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys)); + w->data->keys += bch_keylist_nkeys(keys); - closure_wait(&c->journal.wait, cl); + ret = &fifo_back(&c->journal.pin); + atomic_inc(ret); - journal_reclaim(c); + if (parent) { + closure_wait(&w->wait, parent); + journal_try_write(c); + } else if (!w->need_write) { + schedule_delayed_work(&c->journal.work, + msecs_to_jiffies(c->journal_delay_ms)); + spin_unlock(&c->journal.lock); + } else { spin_unlock(&c->journal.lock); - - btree_flush_write(c); - continue_at(cl, bch_journal, bcache_wq); } - w = c->journal.cur; - w->need_write = true; - b = __set_blocks(w->data, w->data->keys + n, c); - - if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS || - b > c->journal.blocks_free) { - trace_bcache_journal_entry_full(c); - - /* - * XXX: If we were inserting so many keys that they won't fit in - * an _empty_ journal write, we'll deadlock. For now, handle - * this in bch_keylist_realloc() - but something to think about. - */ - BUG_ON(!w->data->keys); - - BUG_ON(!closure_wait(&w->wait, cl)); - - closure_flush(&c->journal.io); - journal_try_write(c); - continue_at(cl, bch_journal, bcache_wq); - } - - memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t)); - w->data->keys += n; + return ret; +} - op->journal = &fifo_back(&c->journal.pin); - atomic_inc(op->journal); +void bch_journal_meta(struct cache_set *c, struct closure *cl) +{ + struct keylist keys; + atomic_t *ref; - if (op->flush_journal) { - closure_flush(&c->journal.io); - closure_wait(&w->wait, cl->parent); - } + bch_keylist_init(&keys); - journal_try_write(c); -out: - bch_btree_insert_async(cl); + ref = bch_journal(c, &keys, cl); + if (ref) + atomic_dec_bug(ref); } void bch_journal_free(struct cache_set *c) @@ -790,6 +782,7 @@ int bch_journal_alloc(struct cache_set *c) closure_init_unlocked(&j->io); spin_lock_init(&j->lock); + INIT_DELAYED_WORK(&j->work, journal_write_work); c->journal_delay_ms = 100; diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index 3d7851274b04..a6472fda94b2 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -75,43 +75,6 @@ * nodes that are pinning the oldest journal entries first. */ -#define BCACHE_JSET_VERSION_UUIDv1 1 -/* Always latest UUID format */ -#define BCACHE_JSET_VERSION_UUID 1 -#define BCACHE_JSET_VERSION 1 - -/* - * On disk format for a journal entry: - * seq is monotonically increasing; every journal entry has its own unique - * sequence number. - * - * last_seq is the oldest journal entry that still has keys the btree hasn't - * flushed to disk yet. - * - * version is for on disk format changes. - */ -struct jset { - uint64_t csum; - uint64_t magic; - uint64_t seq; - uint32_t version; - uint32_t keys; - - uint64_t last_seq; - - BKEY_PADDED(uuid_bucket); - BKEY_PADDED(btree_root); - uint16_t btree_level; - uint16_t pad[3]; - - uint64_t prio_bucket[MAX_CACHES_PER_SET]; - - union { - struct bkey start[0]; - uint64_t d[0]; - }; -}; - /* * Only used for holding the journal entries we read in btree_journal_read() * during cache_registration @@ -140,7 +103,8 @@ struct journal { spinlock_t lock; /* used when waiting because the journal was full */ struct closure_waitlist wait; - struct closure_with_timer io; + struct closure io; + struct delayed_work work; /* Number of blocks free in the bucket(s) we're currently writing to */ unsigned blocks_free; @@ -188,8 +152,7 @@ struct journal_device { }; #define journal_pin_cmp(c, l, r) \ - (fifo_idx(&(c)->journal.pin, (l)->journal) > \ - fifo_idx(&(c)->journal.pin, (r)->journal)) + (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r))) #define JOURNAL_PIN 20000 @@ -199,15 +162,14 @@ struct journal_device { struct closure; struct cache_set; struct btree_op; +struct keylist; -void bch_journal(struct closure *); +atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *); void bch_journal_next(struct journal *); void bch_journal_mark(struct cache_set *, struct list_head *); void bch_journal_meta(struct cache_set *, struct closure *); -int bch_journal_read(struct cache_set *, struct list_head *, - struct btree_op *); -int bch_journal_replay(struct cache_set *, struct list_head *, - struct btree_op *); +int bch_journal_read(struct cache_set *, struct list_head *); +int bch_journal_replay(struct cache_set *, struct list_head *); void bch_journal_free(struct cache_set *); int bch_journal_alloc(struct cache_set *); diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 1a3b4f4786c3..7c1275e66025 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -12,8 +12,9 @@ #include <trace/events/bcache.h> struct moving_io { + struct closure cl; struct keybuf_key *w; - struct search s; + struct data_insert_op op; struct bbio bio; }; @@ -38,13 +39,13 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) static void moving_io_destructor(struct closure *cl) { - struct moving_io *io = container_of(cl, struct moving_io, s.cl); + struct moving_io *io = container_of(cl, struct moving_io, cl); kfree(io); } static void write_moving_finish(struct closure *cl) { - struct moving_io *io = container_of(cl, struct moving_io, s.cl); + struct moving_io *io = container_of(cl, struct moving_io, cl); struct bio *bio = &io->bio.bio; struct bio_vec *bv; int i; @@ -52,13 +53,12 @@ static void write_moving_finish(struct closure *cl) bio_for_each_segment_all(bv, bio, i) __free_page(bv->bv_page); - if (io->s.op.insert_collision) + if (io->op.replace_collision) trace_bcache_gc_copy_collision(&io->w->key); - bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); + bch_keybuf_del(&io->op.c->moving_gc_keys, io->w); - atomic_dec_bug(&io->s.op.c->in_flight); - closure_wake_up(&io->s.op.c->moving_gc_wait); + up(&io->op.c->moving_in_flight); closure_return_with_destructor(cl, moving_io_destructor); } @@ -66,12 +66,12 @@ static void write_moving_finish(struct closure *cl) static void read_moving_endio(struct bio *bio, int error) { struct moving_io *io = container_of(bio->bi_private, - struct moving_io, s.cl); + struct moving_io, cl); if (error) - io->s.error = error; + io->op.error = error; - bch_bbio_endio(io->s.op.c, bio, error, "reading data to move"); + bch_bbio_endio(io->op.c, bio, error, "reading data to move"); } static void moving_init(struct moving_io *io) @@ -85,54 +85,53 @@ static void moving_init(struct moving_io *io) bio->bi_size = KEY_SIZE(&io->w->key) << 9; bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS); - bio->bi_private = &io->s.cl; + bio->bi_private = &io->cl; bio->bi_io_vec = bio->bi_inline_vecs; bch_bio_map(bio, NULL); } static void write_moving(struct closure *cl) { - struct search *s = container_of(cl, struct search, cl); - struct moving_io *io = container_of(s, struct moving_io, s); + struct moving_io *io = container_of(cl, struct moving_io, cl); + struct data_insert_op *op = &io->op; - if (!s->error) { + if (!op->error) { moving_init(io); - io->bio.bio.bi_sector = KEY_START(&io->w->key); - s->op.lock = -1; - s->op.write_prio = 1; - s->op.cache_bio = &io->bio.bio; + io->bio.bio.bi_sector = KEY_START(&io->w->key); + op->write_prio = 1; + op->bio = &io->bio.bio; - s->writeback = KEY_DIRTY(&io->w->key); - s->op.csum = KEY_CSUM(&io->w->key); + op->writeback = KEY_DIRTY(&io->w->key); + op->csum = KEY_CSUM(&io->w->key); - s->op.type = BTREE_REPLACE; - bkey_copy(&s->op.replace, &io->w->key); + bkey_copy(&op->replace_key, &io->w->key); + op->replace = true; - closure_init(&s->op.cl, cl); - bch_insert_data(&s->op.cl); + closure_call(&op->cl, bch_data_insert, NULL, cl); } - continue_at(cl, write_moving_finish, NULL); + continue_at(cl, write_moving_finish, system_wq); } static void read_moving_submit(struct closure *cl) { - struct search *s = container_of(cl, struct search, cl); - struct moving_io *io = container_of(s, struct moving_io, s); + struct moving_io *io = container_of(cl, struct moving_io, cl); struct bio *bio = &io->bio.bio; - bch_submit_bbio(bio, s->op.c, &io->w->key, 0); + bch_submit_bbio(bio, io->op.c, &io->w->key, 0); - continue_at(cl, write_moving, bch_gc_wq); + continue_at(cl, write_moving, system_wq); } -static void read_moving(struct closure *cl) +static void read_moving(struct cache_set *c) { - struct cache_set *c = container_of(cl, struct cache_set, moving_gc); struct keybuf_key *w; struct moving_io *io; struct bio *bio; + struct closure cl; + + closure_init_stack(&cl); /* XXX: if we error, background writeback could stall indefinitely */ @@ -150,8 +149,8 @@ static void read_moving(struct closure *cl) w->private = io; io->w = w; - io->s.op.inode = KEY_INODE(&w->key); - io->s.op.c = c; + io->op.inode = KEY_INODE(&w->key); + io->op.c = c; moving_init(io); bio = &io->bio.bio; @@ -164,13 +163,8 @@ static void read_moving(struct closure *cl) trace_bcache_gc_copy(&w->key); - closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); - - if (atomic_inc_return(&c->in_flight) >= 64) { - closure_wait_event(&c->moving_gc_wait, cl, - atomic_read(&c->in_flight) < 64); - continue_at(cl, read_moving, bch_gc_wq); - } + down(&c->moving_in_flight); + closure_call(&io->cl, read_moving_submit, NULL, &cl); } if (0) { @@ -180,7 +174,7 @@ err: if (!IS_ERR_OR_NULL(w->private)) bch_keybuf_del(&c->moving_gc_keys, w); } - closure_return(cl); + closure_sync(&cl); } static bool bucket_cmp(struct bucket *l, struct bucket *r) @@ -193,15 +187,14 @@ static unsigned bucket_heap_top(struct cache *ca) return GC_SECTORS_USED(heap_peek(&ca->heap)); } -void bch_moving_gc(struct closure *cl) +void bch_moving_gc(struct cache_set *c) { - struct cache_set *c = container_of(cl, struct cache_set, gc.cl); struct cache *ca; struct bucket *b; unsigned i; if (!c->copy_gc_enabled) - closure_return(cl); + return; mutex_lock(&c->bucket_lock); @@ -242,13 +235,11 @@ void bch_moving_gc(struct closure *cl) c->moving_gc_keys.last_scanned = ZERO_KEY; - closure_init(&c->moving_gc, cl); - read_moving(&c->moving_gc); - - closure_return(cl); + read_moving(c); } void bch_moving_init_cache_set(struct cache_set *c) { bch_keybuf_init(&c->moving_gc_keys); + sema_init(&c->moving_in_flight, 64); } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 2a7f0dd6abab..fbcc851ed5a5 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -25,7 +25,7 @@ struct kmem_cache *bch_search_cache; -static void check_should_skip(struct cached_dev *, struct search *); +static void bch_data_insert_start(struct closure *); /* Cgroup interface */ @@ -213,221 +213,79 @@ static void bio_csum(struct bio *bio, struct bkey *k) /* Insert data into cache */ -static void bio_invalidate(struct closure *cl) +static void bch_data_insert_keys(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct bio *bio = op->cache_bio; - - pr_debug("invalidating %i sectors from %llu", - bio_sectors(bio), (uint64_t) bio->bi_sector); - - while (bio_sectors(bio)) { - unsigned len = min(bio_sectors(bio), 1U << 14); - - if (bch_keylist_realloc(&op->keys, 0, op->c)) - goto out; - - bio->bi_sector += len; - bio->bi_size -= len << 9; - - bch_keylist_add(&op->keys, - &KEY(op->inode, bio->bi_sector, len)); - } - - op->insert_data_done = true; - bio_put(bio); -out: - continue_at(cl, bch_journal, bcache_wq); -} - -struct open_bucket { - struct list_head list; - struct task_struct *last; - unsigned sectors_free; - BKEY_PADDED(key); -}; - -void bch_open_buckets_free(struct cache_set *c) -{ - struct open_bucket *b; - - while (!list_empty(&c->data_buckets)) { - b = list_first_entry(&c->data_buckets, - struct open_bucket, list); - list_del(&b->list); - kfree(b); - } -} - -int bch_open_buckets_alloc(struct cache_set *c) -{ - int i; - - spin_lock_init(&c->data_bucket_lock); - - for (i = 0; i < 6; i++) { - struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); - if (!b) - return -ENOMEM; - - list_add(&b->list, &c->data_buckets); - } - - return 0; -} - -/* - * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. - * - * The ideas is if you've got multiple tasks pulling data into the cache at the - * same time, you'll get better cache utilization if you try to segregate their - * data and preserve locality. - * - * For example, say you've starting Firefox at the same time you're copying a - * bunch of files. Firefox will likely end up being fairly hot and stay in the - * cache awhile, but the data you copied might not be; if you wrote all that - * data to the same buckets it'd get invalidated at the same time. - * - * Both of those tasks will be doing fairly random IO so we can't rely on - * detecting sequential IO to segregate their data, but going off of the task - * should be a sane heuristic. - */ -static struct open_bucket *pick_data_bucket(struct cache_set *c, - const struct bkey *search, - struct task_struct *task, - struct bkey *alloc) -{ - struct open_bucket *ret, *ret_task = NULL; - - list_for_each_entry_reverse(ret, &c->data_buckets, list) - if (!bkey_cmp(&ret->key, search)) - goto found; - else if (ret->last == task) - ret_task = ret; - - ret = ret_task ?: list_first_entry(&c->data_buckets, - struct open_bucket, list); -found: - if (!ret->sectors_free && KEY_PTRS(alloc)) { - ret->sectors_free = c->sb.bucket_size; - bkey_copy(&ret->key, alloc); - bkey_init(alloc); - } - - if (!ret->sectors_free) - ret = NULL; - - return ret; -} - -/* - * Allocates some space in the cache to write to, and k to point to the newly - * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the - * end of the newly allocated space). - * - * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many - * sectors were actually allocated. - * - * If s->writeback is true, will not fail. - */ -static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, - struct search *s) -{ - struct cache_set *c = s->op.c; - struct open_bucket *b; - BKEY_PADDED(key) alloc; - struct closure cl, *w = NULL; - unsigned i; - - if (s->writeback) { - closure_init_stack(&cl); - w = &cl; - } + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + atomic_t *journal_ref = NULL; + struct bkey *replace_key = op->replace ? &op->replace_key : NULL; + int ret; /* - * We might have to allocate a new bucket, which we can't do with a - * spinlock held. So if we have to allocate, we drop the lock, allocate - * and then retry. KEY_PTRS() indicates whether alloc points to - * allocated bucket(s). + * If we're looping, might already be waiting on + * another journal write - can't wait on more than one journal write at + * a time + * + * XXX: this looks wrong */ +#if 0 + while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) + closure_sync(&s->cl); +#endif - bkey_init(&alloc.key); - spin_lock(&c->data_bucket_lock); - - while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) { - unsigned watermark = s->op.write_prio - ? WATERMARK_MOVINGGC - : WATERMARK_NONE; - - spin_unlock(&c->data_bucket_lock); - - if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w)) - return false; + if (!op->replace) + journal_ref = bch_journal(op->c, &op->insert_keys, + op->flush_journal ? cl : NULL); - spin_lock(&c->data_bucket_lock); + ret = bch_btree_insert(op->c, &op->insert_keys, + journal_ref, replace_key); + if (ret == -ESRCH) { + op->replace_collision = true; + } else if (ret) { + op->error = -ENOMEM; + op->insert_data_done = true; } - /* - * If we had to allocate, we might race and not need to allocate the - * second time we call find_data_bucket(). If we allocated a bucket but - * didn't use it, drop the refcount bch_bucket_alloc_set() took: - */ - if (KEY_PTRS(&alloc.key)) - __bkey_put(c, &alloc.key); - - for (i = 0; i < KEY_PTRS(&b->key); i++) - EBUG_ON(ptr_stale(c, &b->key, i)); + if (journal_ref) + atomic_dec_bug(journal_ref); - /* Set up the pointer to the space we're allocating: */ + if (!op->insert_data_done) + continue_at(cl, bch_data_insert_start, bcache_wq); - for (i = 0; i < KEY_PTRS(&b->key); i++) - k->ptr[i] = b->key.ptr[i]; + bch_keylist_free(&op->insert_keys); + closure_return(cl); +} - sectors = min(sectors, b->sectors_free); +static void bch_data_invalidate(struct closure *cl) +{ + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + struct bio *bio = op->bio; - SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); - SET_KEY_SIZE(k, sectors); - SET_KEY_PTRS(k, KEY_PTRS(&b->key)); + pr_debug("invalidating %i sectors from %llu", + bio_sectors(bio), (uint64_t) bio->bi_sector); - /* - * Move b to the end of the lru, and keep track of what this bucket was - * last used for: - */ - list_move_tail(&b->list, &c->data_buckets); - bkey_copy_key(&b->key, k); - b->last = s->task; + while (bio_sectors(bio)) { + unsigned sectors = min(bio_sectors(bio), + 1U << (KEY_SIZE_BITS - 1)); - b->sectors_free -= sectors; + if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) + goto out; - for (i = 0; i < KEY_PTRS(&b->key); i++) { - SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); + bio->bi_sector += sectors; + bio->bi_size -= sectors << 9; - atomic_long_add(sectors, - &PTR_CACHE(c, &b->key, i)->sectors_written); + bch_keylist_add(&op->insert_keys, + &KEY(op->inode, bio->bi_sector, sectors)); } - if (b->sectors_free < c->sb.block_size) - b->sectors_free = 0; - - /* - * k takes refcounts on the buckets it points to until it's inserted - * into the btree, but if we're done with this bucket we just transfer - * get_data_bucket()'s refcount. - */ - if (b->sectors_free) - for (i = 0; i < KEY_PTRS(&b->key); i++) - atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); - - spin_unlock(&c->data_bucket_lock); - return true; + op->insert_data_done = true; + bio_put(bio); +out: + continue_at(cl, bch_data_insert_keys, bcache_wq); } -static void bch_insert_data_error(struct closure *cl) +static void bch_data_insert_error(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); /* * Our data write just errored, which means we've got a bunch of keys to @@ -438,35 +296,34 @@ static void bch_insert_data_error(struct closure *cl) * from the keys we'll accomplish just that. */ - struct bkey *src = op->keys.bottom, *dst = op->keys.bottom; + struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; - while (src != op->keys.top) { + while (src != op->insert_keys.top) { struct bkey *n = bkey_next(src); SET_KEY_PTRS(src, 0); - bkey_copy(dst, src); + memmove(dst, src, bkey_bytes(src)); dst = bkey_next(dst); src = n; } - op->keys.top = dst; + op->insert_keys.top = dst; - bch_journal(cl); + bch_data_insert_keys(cl); } -static void bch_insert_data_endio(struct bio *bio, int error) +static void bch_data_insert_endio(struct bio *bio, int error) { struct closure *cl = bio->bi_private; - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); if (error) { /* TODO: We could try to recover from this. */ - if (s->writeback) - s->error = error; - else if (s->write) - set_closure_fn(cl, bch_insert_data_error, bcache_wq); + if (op->writeback) + op->error = error; + else if (!op->replace) + set_closure_fn(cl, bch_data_insert_error, bcache_wq); else set_closure_fn(cl, NULL, NULL); } @@ -474,18 +331,17 @@ static void bch_insert_data_endio(struct bio *bio, int error) bch_bbio_endio(op->c, bio, error, "writing data to cache"); } -static void bch_insert_data_loop(struct closure *cl) +static void bch_data_insert_start(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); - struct bio *bio = op->cache_bio, *n; + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + struct bio *bio = op->bio, *n; - if (op->skip) - return bio_invalidate(cl); + if (op->bypass) + return bch_data_invalidate(cl); if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { set_gc_sectors(op->c); - bch_queue_gc(op->c); + wake_up_gc(op->c); } /* @@ -497,29 +353,30 @@ static void bch_insert_data_loop(struct closure *cl) do { unsigned i; struct bkey *k; - struct bio_set *split = s->d - ? s->d->bio_split : op->c->bio_split; + struct bio_set *split = op->c->bio_split; /* 1 for the device pointer and 1 for the chksum */ - if (bch_keylist_realloc(&op->keys, + if (bch_keylist_realloc(&op->insert_keys, 1 + (op->csum ? 1 : 0), op->c)) - continue_at(cl, bch_journal, bcache_wq); + continue_at(cl, bch_data_insert_keys, bcache_wq); - k = op->keys.top; + k = op->insert_keys.top; bkey_init(k); SET_KEY_INODE(k, op->inode); SET_KEY_OFFSET(k, bio->bi_sector); - if (!bch_alloc_sectors(k, bio_sectors(bio), s)) + if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), + op->write_point, op->write_prio, + op->writeback)) goto err; n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); - n->bi_end_io = bch_insert_data_endio; + n->bi_end_io = bch_data_insert_endio; n->bi_private = cl; - if (s->writeback) { + if (op->writeback) { SET_KEY_DIRTY(k, true); for (i = 0; i < KEY_PTRS(k); i++) @@ -532,17 +389,17 @@ static void bch_insert_data_loop(struct closure *cl) bio_csum(n, k); trace_bcache_cache_insert(k); - bch_keylist_push(&op->keys); + bch_keylist_push(&op->insert_keys); n->bi_rw |= REQ_WRITE; bch_submit_bbio(n, op->c, k, 0); } while (n != bio); op->insert_data_done = true; - continue_at(cl, bch_journal, bcache_wq); + continue_at(cl, bch_data_insert_keys, bcache_wq); err: /* bch_alloc_sectors() blocks if s->writeback = true */ - BUG_ON(s->writeback); + BUG_ON(op->writeback); /* * But if it's not a writeback write we'd rather just bail out if @@ -550,15 +407,15 @@ err: * we might be starving btree writes for gc or something. */ - if (s->write) { + if (!op->replace) { /* * Writethrough write: We can't complete the write until we've * updated the index. But we don't want to delay the write while * we wait for buckets to be freed up, so just invalidate the * rest of the write. */ - op->skip = true; - return bio_invalidate(cl); + op->bypass = true; + return bch_data_invalidate(cl); } else { /* * From a cache miss, we can just insert the keys for the data @@ -567,15 +424,15 @@ err: op->insert_data_done = true; bio_put(bio); - if (!bch_keylist_empty(&op->keys)) - continue_at(cl, bch_journal, bcache_wq); + if (!bch_keylist_empty(&op->insert_keys)) + continue_at(cl, bch_data_insert_keys, bcache_wq); else closure_return(cl); } } /** - * bch_insert_data - stick some data in the cache + * bch_data_insert - stick some data in the cache * * This is the starting point for any data to end up in a cache device; it could * be from a normal write, or a writeback write, or a write to a flash only @@ -587,56 +444,179 @@ err: * data is written it calls bch_journal, and after the keys have been added to * the next journal write they're inserted into the btree. * - * It inserts the data in op->cache_bio; bi_sector is used for the key offset, + * It inserts the data in s->cache_bio; bi_sector is used for the key offset, * and op->inode is used for the key inode. * - * If op->skip is true, instead of inserting the data it invalidates the region - * of the cache represented by op->cache_bio and op->inode. + * If s->bypass is true, instead of inserting the data it invalidates the + * region of the cache represented by s->cache_bio and op->inode. */ -void bch_insert_data(struct closure *cl) +void bch_data_insert(struct closure *cl) { - struct btree_op *op = container_of(cl, struct btree_op, cl); + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + + trace_bcache_write(op->bio, op->writeback, op->bypass); - bch_keylist_init(&op->keys); - bio_get(op->cache_bio); - bch_insert_data_loop(cl); + bch_keylist_init(&op->insert_keys); + bio_get(op->bio); + bch_data_insert_start(cl); } -void bch_btree_insert_async(struct closure *cl) +/* Congested? */ + +unsigned bch_get_congested(struct cache_set *c) { - struct btree_op *op = container_of(cl, struct btree_op, cl); - struct search *s = container_of(op, struct search, op); + int i; + long rand; - if (bch_btree_insert(op, op->c)) { - s->error = -ENOMEM; - op->insert_data_done = true; - } + if (!c->congested_read_threshold_us && + !c->congested_write_threshold_us) + return 0; + + i = (local_clock_us() - c->congested_last_us) / 1024; + if (i < 0) + return 0; + + i += atomic_read(&c->congested); + if (i >= 0) + return 0; - if (op->insert_data_done) { - bch_keylist_free(&op->keys); - closure_return(cl); - } else - continue_at(cl, bch_insert_data_loop, bcache_wq); + i += CONGESTED_MAX; + + if (i > 0) + i = fract_exp_two(i, 6); + + rand = get_random_int(); + i -= bitmap_weight(&rand, BITS_PER_LONG); + + return i > 0 ? i : 1; } -/* Common code for the make_request functions */ +static void add_sequential(struct task_struct *t) +{ + ewma_add(t->sequential_io_avg, + t->sequential_io, 8, 0); -static void request_endio(struct bio *bio, int error) + t->sequential_io = 0; +} + +static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) { - struct closure *cl = bio->bi_private; + return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; +} - if (error) { - struct search *s = container_of(cl, struct search, cl); - s->error = error; - /* Only cache read errors are recoverable */ - s->recoverable = false; +static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) +{ + struct cache_set *c = dc->disk.c; + unsigned mode = cache_mode(dc, bio); + unsigned sectors, congested = bch_get_congested(c); + struct task_struct *task = current; + struct io *i; + + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || + c->gc_stats.in_use > CUTOFF_CACHE_ADD || + (bio->bi_rw & REQ_DISCARD)) + goto skip; + + if (mode == CACHE_MODE_NONE || + (mode == CACHE_MODE_WRITEAROUND && + (bio->bi_rw & REQ_WRITE))) + goto skip; + + if (bio->bi_sector & (c->sb.block_size - 1) || + bio_sectors(bio) & (c->sb.block_size - 1)) { + pr_debug("skipping unaligned io"); + goto skip; } - bio_put(bio); - closure_put(cl); + if (bypass_torture_test(dc)) { + if ((get_random_int() & 3) == 3) + goto skip; + else + goto rescale; + } + + if (!congested && !dc->sequential_cutoff) + goto rescale; + + if (!congested && + mode == CACHE_MODE_WRITEBACK && + (bio->bi_rw & REQ_WRITE) && + (bio->bi_rw & REQ_SYNC)) + goto rescale; + + spin_lock(&dc->io_lock); + + hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) + if (i->last == bio->bi_sector && + time_before(jiffies, i->jiffies)) + goto found; + + i = list_first_entry(&dc->io_lru, struct io, lru); + + add_sequential(task); + i->sequential = 0; +found: + if (i->sequential + bio->bi_size > i->sequential) + i->sequential += bio->bi_size; + + i->last = bio_end_sector(bio); + i->jiffies = jiffies + msecs_to_jiffies(5000); + task->sequential_io = i->sequential; + + hlist_del(&i->hash); + hlist_add_head(&i->hash, iohash(dc, i->last)); + list_move_tail(&i->lru, &dc->io_lru); + + spin_unlock(&dc->io_lock); + + sectors = max(task->sequential_io, + task->sequential_io_avg) >> 9; + + if (dc->sequential_cutoff && + sectors >= dc->sequential_cutoff >> 9) { + trace_bcache_bypass_sequential(bio); + goto skip; + } + + if (congested && sectors >= congested) { + trace_bcache_bypass_congested(bio); + goto skip; + } + +rescale: + bch_rescale_priorities(c, bio_sectors(bio)); + return false; +skip: + bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); + return true; } -void bch_cache_read_endio(struct bio *bio, int error) +/* Cache lookup */ + +struct search { + /* Stack frame for bio_complete */ + struct closure cl; + + struct bcache_device *d; + + struct bbio bio; + struct bio *orig_bio; + struct bio *cache_miss; + + unsigned insert_bio_sectors; + + unsigned recoverable:1; + unsigned unaligned_bvec:1; + unsigned write:1; + unsigned read_dirty_data:1; + + unsigned long start_time; + + struct btree_op op; + struct data_insert_op iop; +}; + +static void bch_cache_read_endio(struct bio *bio, int error) { struct bbio *b = container_of(bio, struct bbio, bio); struct closure *cl = bio->bi_private; @@ -650,13 +630,113 @@ void bch_cache_read_endio(struct bio *bio, int error) */ if (error) - s->error = error; - else if (ptr_stale(s->op.c, &b->key, 0)) { - atomic_long_inc(&s->op.c->cache_read_races); - s->error = -EINTR; + s->iop.error = error; + else if (ptr_stale(s->iop.c, &b->key, 0)) { + atomic_long_inc(&s->iop.c->cache_read_races); + s->iop.error = -EINTR; } - bch_bbio_endio(s->op.c, bio, error, "reading from cache"); + bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); +} + +/* + * Read from a single key, handling the initial cache miss if the key starts in + * the middle of the bio + */ +static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) +{ + struct search *s = container_of(op, struct search, op); + struct bio *n, *bio = &s->bio.bio; + struct bkey *bio_key; + unsigned ptr; + + if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) + return MAP_CONTINUE; + + if (KEY_INODE(k) != s->iop.inode || + KEY_START(k) > bio->bi_sector) { + unsigned bio_sectors = bio_sectors(bio); + unsigned sectors = KEY_INODE(k) == s->iop.inode + ? min_t(uint64_t, INT_MAX, + KEY_START(k) - bio->bi_sector) + : INT_MAX; + + int ret = s->d->cache_miss(b, s, bio, sectors); + if (ret != MAP_CONTINUE) + return ret; + + /* if this was a complete miss we shouldn't get here */ + BUG_ON(bio_sectors <= sectors); + } + + if (!KEY_SIZE(k)) + return MAP_CONTINUE; + + /* XXX: figure out best pointer - for multiple cache devices */ + ptr = 0; + + PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; + + if (KEY_DIRTY(k)) + s->read_dirty_data = true; + + n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, + KEY_OFFSET(k) - bio->bi_sector), + GFP_NOIO, s->d->bio_split); + + bio_key = &container_of(n, struct bbio, bio)->key; + bch_bkey_copy_single_ptr(bio_key, k, ptr); + + bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); + bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); + + n->bi_end_io = bch_cache_read_endio; + n->bi_private = &s->cl; + + /* + * The bucket we're reading from might be reused while our bio + * is in flight, and we could then end up reading the wrong + * data. + * + * We guard against this by checking (in cache_read_endio()) if + * the pointer is stale again; if so, we treat it as an error + * and reread from the backing device (but we don't pass that + * error up anywhere). + */ + + __bch_submit_bbio(n, b->c); + return n == bio ? MAP_DONE : MAP_CONTINUE; +} + +static void cache_lookup(struct closure *cl) +{ + struct search *s = container_of(cl, struct search, iop.cl); + struct bio *bio = &s->bio.bio; + + int ret = bch_btree_map_keys(&s->op, s->iop.c, + &KEY(s->iop.inode, bio->bi_sector, 0), + cache_lookup_fn, MAP_END_KEY); + if (ret == -EAGAIN) + continue_at(cl, cache_lookup, bcache_wq); + + closure_return(cl); +} + +/* Common code for the make_request functions */ + +static void request_endio(struct bio *bio, int error) +{ + struct closure *cl = bio->bi_private; + + if (error) { + struct search *s = container_of(cl, struct search, cl); + s->iop.error = error; + /* Only cache read errors are recoverable */ + s->recoverable = false; + } + + bio_put(bio); + closure_put(cl); } static void bio_complete(struct search *s) @@ -670,8 +750,8 @@ static void bio_complete(struct search *s) part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration); part_stat_unlock(); - trace_bcache_request_end(s, s->orig_bio); - bio_endio(s->orig_bio, s->error); + trace_bcache_request_end(s->d, s->orig_bio); + bio_endio(s->orig_bio, s->iop.error); s->orig_bio = NULL; } } @@ -691,8 +771,8 @@ static void search_free(struct closure *cl) struct search *s = container_of(cl, struct search, cl); bio_complete(s); - if (s->op.cache_bio) - bio_put(s->op.cache_bio); + if (s->iop.bio) + bio_put(s->iop.bio); if (s->unaligned_bvec) mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); @@ -703,21 +783,22 @@ static void search_free(struct closure *cl) static struct search *search_alloc(struct bio *bio, struct bcache_device *d) { + struct search *s; struct bio_vec *bv; - struct search *s = mempool_alloc(d->c->search, GFP_NOIO); - memset(s, 0, offsetof(struct search, op.keys)); + + s = mempool_alloc(d->c->search, GFP_NOIO); + memset(s, 0, offsetof(struct search, iop.insert_keys)); __closure_init(&s->cl, NULL); - s->op.inode = d->id; - s->op.c = d->c; + s->iop.inode = d->id; + s->iop.c = d->c; s->d = d; s->op.lock = -1; - s->task = current; + s->iop.write_point = hash_long((unsigned long) current, 16); s->orig_bio = bio; s->write = (bio->bi_rw & REQ_WRITE) != 0; - s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; - s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; + s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; s->recoverable = 1; s->start_time = jiffies; do_bio_hook(s); @@ -734,18 +815,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d) return s; } -static void btree_read_async(struct closure *cl) -{ - struct btree_op *op = container_of(cl, struct btree_op, cl); - - int ret = btree_root(search_recurse, op->c, op); - - if (ret == -EAGAIN) - continue_at(cl, btree_read_async, bcache_wq); - - closure_return(cl); -} - /* Cached devices */ static void cached_dev_bio_complete(struct closure *cl) @@ -759,27 +828,28 @@ static void cached_dev_bio_complete(struct closure *cl) /* Process reads */ -static void cached_dev_read_complete(struct closure *cl) +static void cached_dev_cache_miss_done(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); - if (s->op.insert_collision) - bch_mark_cache_miss_collision(s); + if (s->iop.replace_collision) + bch_mark_cache_miss_collision(s->iop.c, s->d); - if (s->op.cache_bio) { + if (s->iop.bio) { int i; struct bio_vec *bv; - __bio_for_each_segment(bv, s->op.cache_bio, i, 0) + bio_for_each_segment_all(bv, s->iop.bio, i) __free_page(bv->bv_page); } cached_dev_bio_complete(cl); } -static void request_read_error(struct closure *cl) +static void cached_dev_read_error(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); + struct bio *bio = &s->bio.bio; struct bio_vec *bv; int i; @@ -787,7 +857,7 @@ static void request_read_error(struct closure *cl) /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); - s->error = 0; + s->iop.error = 0; bv = s->bio.bio.bi_io_vec; do_bio_hook(s); s->bio.bio.bi_io_vec = bv; @@ -803,146 +873,148 @@ static void request_read_error(struct closure *cl) /* XXX: invalidate cache */ - closure_bio_submit(&s->bio.bio, &s->cl, s->d); + closure_bio_submit(bio, cl, s->d); } - continue_at(cl, cached_dev_read_complete, NULL); + continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void request_read_done(struct closure *cl) +static void cached_dev_read_done(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); /* - * s->cache_bio != NULL implies that we had a cache miss; cache_bio now - * contains data ready to be inserted into the cache. + * We had a cache miss; cache_bio now contains data ready to be inserted + * into the cache. * * First, we copy the data we just read from cache_bio's bounce buffers * to the buffers the original bio pointed to: */ - if (s->op.cache_bio) { - bio_reset(s->op.cache_bio); - s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; - s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; - s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; - bch_bio_map(s->op.cache_bio, NULL); + if (s->iop.bio) { + bio_reset(s->iop.bio); + s->iop.bio->bi_sector = s->cache_miss->bi_sector; + s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; + s->iop.bio->bi_size = s->insert_bio_sectors << 9; + bch_bio_map(s->iop.bio, NULL); - bio_copy_data(s->cache_miss, s->op.cache_bio); + bio_copy_data(s->cache_miss, s->iop.bio); bio_put(s->cache_miss); s->cache_miss = NULL; } - if (verify(dc, &s->bio.bio) && s->recoverable) - bch_data_verify(s); + if (verify(dc, &s->bio.bio) && s->recoverable && + !s->unaligned_bvec && !s->read_dirty_data) + bch_data_verify(dc, s->orig_bio); bio_complete(s); - if (s->op.cache_bio && - !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) { - s->op.type = BTREE_REPLACE; - closure_call(&s->op.cl, bch_insert_data, NULL, cl); + if (s->iop.bio && + !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { + BUG_ON(!s->iop.replace); + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); } - continue_at(cl, cached_dev_read_complete, NULL); + continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void request_read_done_bh(struct closure *cl) +static void cached_dev_read_done_bh(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); - trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip); + bch_mark_cache_accounting(s->iop.c, s->d, + !s->cache_miss, s->iop.bypass); + trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); - if (s->error) - continue_at_nobarrier(cl, request_read_error, bcache_wq); - else if (s->op.cache_bio || verify(dc, &s->bio.bio)) - continue_at_nobarrier(cl, request_read_done, bcache_wq); + if (s->iop.error) + continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); + else if (s->iop.bio || verify(dc, &s->bio.bio)) + continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); else - continue_at_nobarrier(cl, cached_dev_read_complete, NULL); + continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); } static int cached_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) { - int ret = 0; - unsigned reada; + int ret = MAP_CONTINUE; + unsigned reada = 0; struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - struct bio *miss; - - miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); - if (miss == bio) - s->op.lookup_done = true; + struct bio *miss, *cache_bio; - miss->bi_end_io = request_endio; - miss->bi_private = &s->cl; - - if (s->cache_miss || s->op.skip) + if (s->cache_miss || s->iop.bypass) { + miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); + ret = miss == bio ? MAP_DONE : MAP_CONTINUE; goto out_submit; - - if (miss != bio || - (bio->bi_rw & REQ_RAHEAD) || - (bio->bi_rw & REQ_META) || - s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA) - reada = 0; - else { - reada = min(dc->readahead >> 9, - sectors - bio_sectors(miss)); - - if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev)) - reada = bdev_sectors(miss->bi_bdev) - - bio_end_sector(miss); } - s->cache_bio_sectors = bio_sectors(miss) + reada; - s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, - DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), - dc->disk.bio_split); + if (!(bio->bi_rw & REQ_RAHEAD) && + !(bio->bi_rw & REQ_META) && + s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) + reada = min_t(sector_t, dc->readahead >> 9, + bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); - if (!s->op.cache_bio) - goto out_submit; + s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); - s->op.cache_bio->bi_sector = miss->bi_sector; - s->op.cache_bio->bi_bdev = miss->bi_bdev; - s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; + s->iop.replace_key = KEY(s->iop.inode, + bio->bi_sector + s->insert_bio_sectors, + s->insert_bio_sectors); - s->op.cache_bio->bi_end_io = request_endio; - s->op.cache_bio->bi_private = &s->cl; + ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); + if (ret) + return ret; + + s->iop.replace = true; + + miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); /* btree_search_recurse()'s btree iterator is no good anymore */ - ret = -EINTR; - if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio)) - goto out_put; + ret = miss == bio ? MAP_DONE : -EINTR; + + cache_bio = bio_alloc_bioset(GFP_NOWAIT, + DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), + dc->disk.bio_split); + if (!cache_bio) + goto out_submit; + + cache_bio->bi_sector = miss->bi_sector; + cache_bio->bi_bdev = miss->bi_bdev; + cache_bio->bi_size = s->insert_bio_sectors << 9; + + cache_bio->bi_end_io = request_endio; + cache_bio->bi_private = &s->cl; - bch_bio_map(s->op.cache_bio, NULL); - if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) + bch_bio_map(cache_bio, NULL); + if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) goto out_put; - s->cache_miss = miss; - bio_get(s->op.cache_bio); + if (reada) + bch_mark_cache_readahead(s->iop.c, s->d); - closure_bio_submit(s->op.cache_bio, &s->cl, s->d); + s->cache_miss = miss; + s->iop.bio = cache_bio; + bio_get(cache_bio); + closure_bio_submit(cache_bio, &s->cl, s->d); return ret; out_put: - bio_put(s->op.cache_bio); - s->op.cache_bio = NULL; + bio_put(cache_bio); out_submit: + miss->bi_end_io = request_endio; + miss->bi_private = &s->cl; closure_bio_submit(miss, &s->cl, s->d); return ret; } -static void request_read(struct cached_dev *dc, struct search *s) +static void cached_dev_read(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; - check_should_skip(dc, s); - closure_call(&s->op.cl, btree_read_async, NULL, cl); - - continue_at(cl, request_read_done_bh, NULL); + closure_call(&s->iop.cl, cache_lookup, NULL, cl); + continue_at(cl, cached_dev_read_done_bh, NULL); } /* Process writes */ @@ -956,47 +1028,52 @@ static void cached_dev_write_complete(struct closure *cl) cached_dev_bio_complete(cl); } -static void request_write(struct cached_dev *dc, struct search *s) +static void cached_dev_write(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; struct bio *bio = &s->bio.bio; - struct bkey start, end; - start = KEY(dc->disk.id, bio->bi_sector, 0); - end = KEY(dc->disk.id, bio_end_sector(bio), 0); + struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); + struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); - bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); + bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); - check_should_skip(dc, s); down_read_non_owner(&dc->writeback_lock); - if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { - s->op.skip = false; - s->writeback = true; + /* + * We overlap with some dirty data undergoing background + * writeback, force this write to writeback + */ + s->iop.bypass = false; + s->iop.writeback = true; } + /* + * Discards aren't _required_ to do anything, so skipping if + * check_overlapping returned true is ok + * + * But check_overlapping drops dirty keys for which io hasn't started, + * so we still want to call it. + */ if (bio->bi_rw & REQ_DISCARD) - goto skip; + s->iop.bypass = true; if (should_writeback(dc, s->orig_bio, cache_mode(dc, bio), - s->op.skip)) { - s->op.skip = false; - s->writeback = true; + s->iop.bypass)) { + s->iop.bypass = false; + s->iop.writeback = true; } - if (s->op.skip) - goto skip; - - trace_bcache_write(s->orig_bio, s->writeback, s->op.skip); + if (s->iop.bypass) { + s->iop.bio = s->orig_bio; + bio_get(s->iop.bio); - if (!s->writeback) { - s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, - dc->disk.bio_split); - - closure_bio_submit(bio, cl, s->d); - } else { + if (!(bio->bi_rw & REQ_DISCARD) || + blk_queue_discard(bdev_get_queue(dc->bdev))) + closure_bio_submit(bio, cl, s->d); + } else if (s->iop.writeback) { bch_writeback_add(dc); - s->op.cache_bio = bio; + s->iop.bio = bio; if (bio->bi_rw & REQ_FLUSH) { /* Also need to send a flush to the backing device */ @@ -1010,36 +1087,26 @@ static void request_write(struct cached_dev *dc, struct search *s) closure_bio_submit(flush, cl, s->d); } - } -out: - closure_call(&s->op.cl, bch_insert_data, NULL, cl); - continue_at(cl, cached_dev_write_complete, NULL); -skip: - s->op.skip = true; - s->op.cache_bio = s->orig_bio; - bio_get(s->op.cache_bio); + } else { + s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, + dc->disk.bio_split); - if ((bio->bi_rw & REQ_DISCARD) && - !blk_queue_discard(bdev_get_queue(dc->bdev))) - goto out; + closure_bio_submit(bio, cl, s->d); + } - closure_bio_submit(bio, cl, s->d); - goto out; + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); + continue_at(cl, cached_dev_write_complete, NULL); } -static void request_nodata(struct cached_dev *dc, struct search *s) +static void cached_dev_nodata(struct closure *cl) { - struct closure *cl = &s->cl; + struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; - if (bio->bi_rw & REQ_DISCARD) { - request_write(dc, s); - return; - } - - if (s->op.flush_journal) - bch_journal_meta(s->op.c, cl); + if (s->iop.flush_journal) + bch_journal_meta(s->iop.c, cl); + /* If it's a flush, we send the flush to the backing device too */ closure_bio_submit(bio, cl, s->d); continue_at(cl, cached_dev_bio_complete, NULL); @@ -1047,134 +1114,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s) /* Cached devices - read & write stuff */ -unsigned bch_get_congested(struct cache_set *c) -{ - int i; - long rand; - - if (!c->congested_read_threshold_us && - !c->congested_write_threshold_us) - return 0; - - i = (local_clock_us() - c->congested_last_us) / 1024; - if (i < 0) - return 0; - - i += atomic_read(&c->congested); - if (i >= 0) - return 0; - - i += CONGESTED_MAX; - - if (i > 0) - i = fract_exp_two(i, 6); - - rand = get_random_int(); - i -= bitmap_weight(&rand, BITS_PER_LONG); - - return i > 0 ? i : 1; -} - -static void add_sequential(struct task_struct *t) -{ - ewma_add(t->sequential_io_avg, - t->sequential_io, 8, 0); - - t->sequential_io = 0; -} - -static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) -{ - return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; -} - -static void check_should_skip(struct cached_dev *dc, struct search *s) -{ - struct cache_set *c = s->op.c; - struct bio *bio = &s->bio.bio; - unsigned mode = cache_mode(dc, bio); - unsigned sectors, congested = bch_get_congested(c); - - if (atomic_read(&dc->disk.detaching) || - c->gc_stats.in_use > CUTOFF_CACHE_ADD || - (bio->bi_rw & REQ_DISCARD)) - goto skip; - - if (mode == CACHE_MODE_NONE || - (mode == CACHE_MODE_WRITEAROUND && - (bio->bi_rw & REQ_WRITE))) - goto skip; - - if (bio->bi_sector & (c->sb.block_size - 1) || - bio_sectors(bio) & (c->sb.block_size - 1)) { - pr_debug("skipping unaligned io"); - goto skip; - } - - if (!congested && !dc->sequential_cutoff) - goto rescale; - - if (!congested && - mode == CACHE_MODE_WRITEBACK && - (bio->bi_rw & REQ_WRITE) && - (bio->bi_rw & REQ_SYNC)) - goto rescale; - - if (dc->sequential_merge) { - struct io *i; - - spin_lock(&dc->io_lock); - - hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) - if (i->last == bio->bi_sector && - time_before(jiffies, i->jiffies)) - goto found; - - i = list_first_entry(&dc->io_lru, struct io, lru); - - add_sequential(s->task); - i->sequential = 0; -found: - if (i->sequential + bio->bi_size > i->sequential) - i->sequential += bio->bi_size; - - i->last = bio_end_sector(bio); - i->jiffies = jiffies + msecs_to_jiffies(5000); - s->task->sequential_io = i->sequential; - - hlist_del(&i->hash); - hlist_add_head(&i->hash, iohash(dc, i->last)); - list_move_tail(&i->lru, &dc->io_lru); - - spin_unlock(&dc->io_lock); - } else { - s->task->sequential_io = bio->bi_size; - - add_sequential(s->task); - } - - sectors = max(s->task->sequential_io, - s->task->sequential_io_avg) >> 9; - - if (dc->sequential_cutoff && - sectors >= dc->sequential_cutoff >> 9) { - trace_bcache_bypass_sequential(s->orig_bio); - goto skip; - } - - if (congested && sectors >= congested) { - trace_bcache_bypass_congested(s->orig_bio); - goto skip; - } - -rescale: - bch_rescale_priorities(c, bio_sectors(bio)); - return; -skip: - bch_mark_sectors_bypassed(s, bio_sectors(bio)); - s->op.skip = true; -} - static void cached_dev_make_request(struct request_queue *q, struct bio *bio) { struct search *s; @@ -1192,14 +1131,24 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) if (cached_dev_get(dc)) { s = search_alloc(bio, d); - trace_bcache_request_start(s, bio); - - if (!bio_has_data(bio)) - request_nodata(dc, s); - else if (rw) - request_write(dc, s); - else - request_read(dc, s); + trace_bcache_request_start(s->d, bio); + + if (!bio->bi_size) { + /* + * can't call bch_journal_meta from under + * generic_make_request + */ + continue_at_nobarrier(&s->cl, + cached_dev_nodata, + bcache_wq); + } else { + s->iop.bypass = check_should_bypass(dc, bio); + + if (rw) + cached_dev_write(dc, s); + else + cached_dev_read(dc, s); + } } else { if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(dc->bdev))) @@ -1274,9 +1223,19 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, bio_advance(bio, min(sectors << 9, bio->bi_size)); if (!bio->bi_size) - s->op.lookup_done = true; + return MAP_DONE; - return 0; + return MAP_CONTINUE; +} + +static void flash_dev_nodata(struct closure *cl) +{ + struct search *s = container_of(cl, struct search, cl); + + if (s->iop.flush_journal) + bch_journal_meta(s->iop.c, cl); + + continue_at(cl, search_free, NULL); } static void flash_dev_make_request(struct request_queue *q, struct bio *bio) @@ -1295,23 +1254,28 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) cl = &s->cl; bio = &s->bio.bio; - trace_bcache_request_start(s, bio); + trace_bcache_request_start(s->d, bio); - if (bio_has_data(bio) && !rw) { - closure_call(&s->op.cl, btree_read_async, NULL, cl); - } else if (bio_has_data(bio) || s->op.skip) { - bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, + if (!bio->bi_size) { + /* + * can't call bch_journal_meta from under + * generic_make_request + */ + continue_at_nobarrier(&s->cl, + flash_dev_nodata, + bcache_wq); + } else if (rw) { + bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &KEY(d->id, bio->bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); - s->writeback = true; - s->op.cache_bio = bio; + s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; + s->iop.writeback = true; + s->iop.bio = bio; - closure_call(&s->op.cl, bch_insert_data, NULL, cl); + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); } else { - /* No data - probably a cache flush */ - if (s->op.flush_journal) - bch_journal_meta(s->op.c, cl); + closure_call(&s->iop.cl, cache_lookup, NULL, cl); } continue_at(cl, search_free, NULL); diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 57dc4784f4f4..2cd65bf073c2 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -3,40 +3,33 @@ #include <linux/cgroup.h> -struct search { - /* Stack frame for bio_complete */ +struct data_insert_op { struct closure cl; + struct cache_set *c; + struct bio *bio; - struct bcache_device *d; - struct task_struct *task; - - struct bbio bio; - struct bio *orig_bio; - struct bio *cache_miss; - unsigned cache_bio_sectors; - - unsigned recoverable:1; - unsigned unaligned_bvec:1; + unsigned inode; + uint16_t write_point; + uint16_t write_prio; + short error; - unsigned write:1; + unsigned bypass:1; unsigned writeback:1; + unsigned flush_journal:1; + unsigned csum:1; - /* IO error returned to s->bio */ - short error; - unsigned long start_time; + unsigned replace:1; + unsigned replace_collision:1; + + unsigned insert_data_done:1; - /* Anything past op->keys won't get zeroed in do_bio_hook */ - struct btree_op op; + /* Anything past this point won't get zeroed in search_alloc() */ + struct keylist insert_keys; + BKEY_PADDED(replace_key); }; -void bch_cache_read_endio(struct bio *, int); unsigned bch_get_congested(struct cache_set *); -void bch_insert_data(struct closure *cl); -void bch_btree_insert_async(struct closure *); -void bch_cache_read_endio(struct bio *, int); - -void bch_open_buckets_free(struct cache_set *); -int bch_open_buckets_alloc(struct cache_set *); +void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); void bch_flash_dev_request_init(struct bcache_device *d); diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index b8730e714d69..84d0782f702e 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -7,7 +7,6 @@ #include "bcache.h" #include "stats.h" #include "btree.h" -#include "request.h" #include "sysfs.h" /* @@ -196,35 +195,36 @@ static void mark_cache_stats(struct cache_stat_collector *stats, atomic_inc(&stats->cache_bypass_misses); } -void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass) +void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, + bool hit, bool bypass) { - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); + struct cached_dev *dc = container_of(d, struct cached_dev, disk); mark_cache_stats(&dc->accounting.collector, hit, bypass); - mark_cache_stats(&s->op.c->accounting.collector, hit, bypass); + mark_cache_stats(&c->accounting.collector, hit, bypass); #ifdef CONFIG_CGROUP_BCACHE mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); #endif } -void bch_mark_cache_readahead(struct search *s) +void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) { - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); + struct cached_dev *dc = container_of(d, struct cached_dev, disk); atomic_inc(&dc->accounting.collector.cache_readaheads); - atomic_inc(&s->op.c->accounting.collector.cache_readaheads); + atomic_inc(&c->accounting.collector.cache_readaheads); } -void bch_mark_cache_miss_collision(struct search *s) +void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) { - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); + struct cached_dev *dc = container_of(d, struct cached_dev, disk); atomic_inc(&dc->accounting.collector.cache_miss_collisions); - atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions); + atomic_inc(&c->accounting.collector.cache_miss_collisions); } -void bch_mark_sectors_bypassed(struct search *s, int sectors) +void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, + int sectors) { - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); - atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); + atomic_add(sectors, &c->accounting.collector.sectors_bypassed); } void bch_cache_accounting_init(struct cache_accounting *acc, diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index c7c7a8fd29fe..adbff141c887 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -38,7 +38,9 @@ struct cache_accounting { struct cache_stats day; }; -struct search; +struct cache_set; +struct cached_dev; +struct bcache_device; void bch_cache_accounting_init(struct cache_accounting *acc, struct closure *parent); @@ -50,9 +52,10 @@ void bch_cache_accounting_clear(struct cache_accounting *acc); void bch_cache_accounting_destroy(struct cache_accounting *acc); -void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass); -void bch_mark_cache_readahead(struct search *s); -void bch_mark_cache_miss_collision(struct search *s); -void bch_mark_sectors_bypassed(struct search *s, int sectors); +void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *, + bool, bool); +void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *); +void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *); +void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int); #endif /* _BCACHE_STATS_H_ */ diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 547c4c57b052..dec15cd2d797 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -16,6 +16,7 @@ #include <linux/buffer_head.h> #include <linux/debugfs.h> #include <linux/genhd.h> +#include <linux/idr.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> @@ -45,21 +46,13 @@ const char * const bch_cache_modes[] = { NULL }; -struct uuid_entry_v0 { - uint8_t uuid[16]; - uint8_t label[32]; - uint32_t first_reg; - uint32_t last_reg; - uint32_t invalidated; - uint32_t pad; -}; - static struct kobject *bcache_kobj; struct mutex bch_register_lock; LIST_HEAD(bch_cache_sets); static LIST_HEAD(uncached_devices); -static int bcache_major, bcache_minor; +static int bcache_major; +static DEFINE_IDA(bcache_minor); static wait_queue_head_t unregister_wait; struct workqueue_struct *bcache_wq; @@ -382,7 +375,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) { struct bkey *k = &j->uuid_bucket; - if (__bch_ptr_invalid(c, 1, k)) + if (bch_btree_ptr_invalid(c, k)) return "bad uuid pointer"; bkey_copy(&c->uuid_bucket, k); @@ -427,7 +420,7 @@ static int __uuid_write(struct cache_set *c) lockdep_assert_held(&bch_register_lock); - if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) + if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true)) return 1; SET_KEY_SIZE(&k.key, c->sb.bucket_size); @@ -435,7 +428,7 @@ static int __uuid_write(struct cache_set *c) closure_sync(&cl); bkey_copy(&c->uuid_bucket, &k.key); - __bkey_put(c, &k.key); + bkey_put(c, &k.key); return 0; } @@ -562,10 +555,10 @@ void bch_prio_write(struct cache *ca) } p->next_bucket = ca->prio_buckets[i + 1]; - p->magic = pset_magic(ca); + p->magic = pset_magic(&ca->sb); p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); - bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); + bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true); BUG_ON(bucket == -1); mutex_unlock(&ca->set->bucket_lock); @@ -613,7 +606,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) pr_warn("bad csum reading priorities"); - if (p->magic != pset_magic(ca)) + if (p->magic != pset_magic(&ca->sb)) pr_warn("bad magic reading priorities"); bucket = p->next_bucket; @@ -630,7 +623,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) static int open_dev(struct block_device *b, fmode_t mode) { struct bcache_device *d = b->bd_disk->private_data; - if (atomic_read(&d->closing)) + if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) return -ENXIO; closure_get(&d->cl); @@ -659,20 +652,24 @@ static const struct block_device_operations bcache_ops = { void bcache_device_stop(struct bcache_device *d) { - if (!atomic_xchg(&d->closing, 1)) + if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) closure_queue(&d->cl); } static void bcache_device_unlink(struct bcache_device *d) { - unsigned i; - struct cache *ca; + lockdep_assert_held(&bch_register_lock); - sysfs_remove_link(&d->c->kobj, d->name); - sysfs_remove_link(&d->kobj, "cache"); + if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { + unsigned i; + struct cache *ca; - for_each_cache(ca, d->c, i) - bd_unlink_disk_holder(ca->bdev, d->disk); + sysfs_remove_link(&d->c->kobj, d->name); + sysfs_remove_link(&d->kobj, "cache"); + + for_each_cache(ca, d->c, i) + bd_unlink_disk_holder(ca->bdev, d->disk); + } } static void bcache_device_link(struct bcache_device *d, struct cache_set *c, @@ -696,19 +693,16 @@ static void bcache_device_detach(struct bcache_device *d) { lockdep_assert_held(&bch_register_lock); - if (atomic_read(&d->detaching)) { + if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { struct uuid_entry *u = d->c->uuids + d->id; SET_UUID_FLASH_ONLY(u, 0); memcpy(u->uuid, invalid_uuid, 16); u->invalidated = cpu_to_le32(get_seconds()); bch_uuid_write(d->c); - - atomic_set(&d->detaching, 0); } - if (!d->flush_done) - bcache_device_unlink(d); + bcache_device_unlink(d); d->c->devices[d->id] = NULL; closure_put(&d->c->caching); @@ -739,14 +733,20 @@ static void bcache_device_free(struct bcache_device *d) del_gendisk(d->disk); if (d->disk && d->disk->queue) blk_cleanup_queue(d->disk->queue); - if (d->disk) + if (d->disk) { + ida_simple_remove(&bcache_minor, d->disk->first_minor); put_disk(d->disk); + } bio_split_pool_free(&d->bio_split_hook); if (d->unaligned_bvec) mempool_destroy(d->unaligned_bvec); if (d->bio_split) bioset_free(d->bio_split); + if (is_vmalloc_addr(d->full_dirty_stripes)) + vfree(d->full_dirty_stripes); + else + kfree(d->full_dirty_stripes); if (is_vmalloc_addr(d->stripe_sectors_dirty)) vfree(d->stripe_sectors_dirty); else @@ -760,15 +760,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, { struct request_queue *q; size_t n; + int minor; - if (!d->stripe_size_bits) - d->stripe_size_bits = 31; + if (!d->stripe_size) + d->stripe_size = 1 << 31; - d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >> - d->stripe_size_bits; + d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); - if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) + if (!d->nr_stripes || + d->nr_stripes > INT_MAX || + d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { + pr_err("nr_stripes too large"); return -ENOMEM; + } n = d->nr_stripes * sizeof(atomic_t); d->stripe_sectors_dirty = n < PAGE_SIZE << 6 @@ -777,22 +781,38 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, if (!d->stripe_sectors_dirty) return -ENOMEM; + n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); + d->full_dirty_stripes = n < PAGE_SIZE << 6 + ? kzalloc(n, GFP_KERNEL) + : vzalloc(n); + if (!d->full_dirty_stripes) + return -ENOMEM; + + minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL); + if (minor < 0) + return minor; + if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, sizeof(struct bio_vec) * BIO_MAX_PAGES)) || bio_split_pool_init(&d->bio_split_hook) || - !(d->disk = alloc_disk(1)) || - !(q = blk_alloc_queue(GFP_KERNEL))) + !(d->disk = alloc_disk(1))) { + ida_simple_remove(&bcache_minor, minor); return -ENOMEM; + } set_capacity(d->disk, sectors); - snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); + snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor); d->disk->major = bcache_major; - d->disk->first_minor = bcache_minor++; + d->disk->first_minor = minor; d->disk->fops = &bcache_ops; d->disk->private_data = d; + q = blk_alloc_queue(GFP_KERNEL); + if (!q) + return -ENOMEM; + blk_queue_make_request(q, NULL); d->disk->queue = q; q->queuedata = d; @@ -874,7 +894,7 @@ static void cached_dev_detach_finish(struct work_struct *w) struct closure cl; closure_init_stack(&cl); - BUG_ON(!atomic_read(&dc->disk.detaching)); + BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); BUG_ON(atomic_read(&dc->count)); mutex_lock(&bch_register_lock); @@ -888,6 +908,8 @@ static void cached_dev_detach_finish(struct work_struct *w) bcache_device_detach(&dc->disk); list_move(&dc->list, &uncached_devices); + clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); + mutex_unlock(&bch_register_lock); pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); @@ -900,10 +922,10 @@ void bch_cached_dev_detach(struct cached_dev *dc) { lockdep_assert_held(&bch_register_lock); - if (atomic_read(&dc->disk.closing)) + if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) return; - if (atomic_xchg(&dc->disk.detaching, 1)) + if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) return; /* @@ -1030,6 +1052,7 @@ static void cached_dev_free(struct closure *cl) struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); cancel_delayed_work_sync(&dc->writeback_rate_update); + kthread_stop(dc->writeback_thread); mutex_lock(&bch_register_lock); @@ -1058,11 +1081,7 @@ static void cached_dev_flush(struct closure *cl) struct bcache_device *d = &dc->disk; mutex_lock(&bch_register_lock); - d->flush_done = 1; - - if (d->c) - bcache_device_unlink(d); - + bcache_device_unlink(d); mutex_unlock(&bch_register_lock); bch_cache_accounting_destroy(&dc->accounting); @@ -1088,7 +1107,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) spin_lock_init(&dc->io_lock); bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); - dc->sequential_merge = true; dc->sequential_cutoff = 4 << 20; for (io = dc->io; io < dc->io + RECENT_IO; io++) { @@ -1260,7 +1278,8 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) { va_list args; - if (test_bit(CACHE_SET_STOPPING, &c->flags)) + if (c->on_error != ON_ERROR_PANIC && + test_bit(CACHE_SET_STOPPING, &c->flags)) return false; /* XXX: we can be called from atomic context @@ -1275,6 +1294,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) printk(", disabling caching\n"); + if (c->on_error == ON_ERROR_PANIC) + panic("panic forced after error\n"); + bch_cache_set_unregister(c); return true; } @@ -1339,6 +1361,9 @@ static void cache_set_flush(struct closure *cl) kobject_put(&c->internal); kobject_del(&c->kobj); + if (c->gc_thread) + kthread_stop(c->gc_thread); + if (!IS_ERR_OR_NULL(c->root)) list_add(&c->root->list, &c->btree_cache); @@ -1433,12 +1458,19 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) c->sort_crit_factor = int_sqrt(c->btree_pages); - mutex_init(&c->bucket_lock); - mutex_init(&c->sort_lock); - spin_lock_init(&c->sort_time_lock); closure_init_unlocked(&c->sb_write); + mutex_init(&c->bucket_lock); + init_waitqueue_head(&c->try_wait); + init_waitqueue_head(&c->bucket_wait); closure_init_unlocked(&c->uuid_write); - spin_lock_init(&c->btree_read_time_lock); + mutex_init(&c->sort_lock); + + spin_lock_init(&c->sort_time.lock); + spin_lock_init(&c->btree_gc_time.lock); + spin_lock_init(&c->btree_split_time.lock); + spin_lock_init(&c->btree_read_time.lock); + spin_lock_init(&c->try_harder_time.lock); + bch_moving_init_cache_set(c); INIT_LIST_HEAD(&c->list); @@ -1483,11 +1515,10 @@ static void run_cache_set(struct cache_set *c) const char *err = "cannot allocate memory"; struct cached_dev *dc, *t; struct cache *ca; + struct closure cl; unsigned i; - struct btree_op op; - bch_btree_op_init_stack(&op); - op.lock = SHRT_MAX; + closure_init_stack(&cl); for_each_cache(ca, c, i) c->nbuckets += ca->sb.nbuckets; @@ -1498,7 +1529,7 @@ static void run_cache_set(struct cache_set *c) struct jset *j; err = "cannot allocate memory for journal"; - if (bch_journal_read(c, &journal, &op)) + if (bch_journal_read(c, &journal)) goto err; pr_debug("btree_journal_read() done"); @@ -1522,23 +1553,23 @@ static void run_cache_set(struct cache_set *c) k = &j->btree_root; err = "bad btree root"; - if (__bch_ptr_invalid(c, j->btree_level + 1, k)) + if (bch_btree_ptr_invalid(c, k)) goto err; err = "error reading btree root"; - c->root = bch_btree_node_get(c, k, j->btree_level, &op); + c->root = bch_btree_node_get(c, k, j->btree_level, true); if (IS_ERR_OR_NULL(c->root)) goto err; list_del_init(&c->root->list); rw_unlock(true, c->root); - err = uuid_read(c, j, &op.cl); + err = uuid_read(c, j, &cl); if (err) goto err; err = "error in recovery"; - if (bch_btree_check(c, &op)) + if (bch_btree_check(c)) goto err; bch_journal_mark(c, &journal); @@ -1570,11 +1601,9 @@ static void run_cache_set(struct cache_set *c) if (j->version < BCACHE_JSET_VERSION_UUID) __uuid_write(c); - bch_journal_replay(c, &journal, &op); + bch_journal_replay(c, &journal); } else { pr_notice("invalidating existing data"); - /* Don't want invalidate_buckets() to queue a gc yet */ - closure_lock(&c->gc, NULL); for_each_cache(ca, c, i) { unsigned j; @@ -1600,15 +1629,15 @@ static void run_cache_set(struct cache_set *c) err = "cannot allocate new UUID bucket"; if (__uuid_write(c)) - goto err_unlock_gc; + goto err; err = "cannot allocate new btree root"; - c->root = bch_btree_node_alloc(c, 0, &op.cl); + c->root = bch_btree_node_alloc(c, 0, true); if (IS_ERR_OR_NULL(c->root)) - goto err_unlock_gc; + goto err; bkey_copy_key(&c->root->key, &MAX_KEY); - bch_btree_node_write(c->root, &op.cl); + bch_btree_node_write(c->root, &cl); bch_btree_set_root(c->root); rw_unlock(true, c->root); @@ -1621,14 +1650,14 @@ static void run_cache_set(struct cache_set *c) SET_CACHE_SYNC(&c->sb, true); bch_journal_next(&c->journal); - bch_journal_meta(c, &op.cl); - - /* Unlock */ - closure_set_stopped(&c->gc.cl); - closure_put(&c->gc.cl); + bch_journal_meta(c, &cl); } - closure_sync(&op.cl); + err = "error starting gc thread"; + if (bch_gc_thread_start(c)) + goto err; + + closure_sync(&cl); c->sb.last_mount = get_seconds(); bcache_write_super(c); @@ -1638,13 +1667,10 @@ static void run_cache_set(struct cache_set *c) flash_devs_run(c); return; -err_unlock_gc: - closure_set_stopped(&c->gc.cl); - closure_put(&c->gc.cl); err: - closure_sync(&op.cl); + closure_sync(&cl); /* XXX: test this, it's broken */ - bch_cache_set_error(c, err); + bch_cache_set_error(c, "%s", err); } static bool can_attach_cache(struct cache *ca, struct cache_set *c) @@ -1725,8 +1751,6 @@ void bch_cache_release(struct kobject *kobj) if (ca->set) ca->set->cache[ca->sb.nr_this_dev] = NULL; - bch_cache_allocator_exit(ca); - bio_split_pool_free(&ca->bio_split_hook); free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); @@ -1758,8 +1782,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) __module_get(THIS_MODULE); kobject_init(&ca->kobj, &bch_cache_ktype); - INIT_LIST_HEAD(&ca->discards); - bio_init(&ca->journal.bio); ca->journal.bio.bi_max_vecs = 8; ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; @@ -2006,7 +2028,6 @@ static struct notifier_block reboot = { static void bcache_exit(void) { bch_debug_exit(); - bch_writeback_exit(); bch_request_exit(); bch_btree_exit(); if (bcache_kobj) @@ -2039,7 +2060,6 @@ static int __init bcache_init(void) sysfs_create_files(bcache_kobj, files) || bch_btree_init() || bch_request_init() || - bch_writeback_init() || bch_debug_init(bcache_kobj)) goto err; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 924dcfdae111..80d4c2bee18a 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -21,6 +21,12 @@ static const char * const cache_replacement_policies[] = { NULL }; +static const char * const error_actions[] = { + "unregister", + "panic", + NULL +}; + write_attribute(attach); write_attribute(detach); write_attribute(unregister); @@ -66,7 +72,6 @@ rw_attribute(congested_read_threshold_us); rw_attribute(congested_write_threshold_us); rw_attribute(sequential_cutoff); -rw_attribute(sequential_merge); rw_attribute(data_csum); rw_attribute(cache_mode); rw_attribute(writeback_metadata); @@ -90,11 +95,14 @@ rw_attribute(discard); rw_attribute(running); rw_attribute(label); rw_attribute(readahead); +rw_attribute(errors); rw_attribute(io_error_limit); rw_attribute(io_error_halflife); rw_attribute(verify); +rw_attribute(bypass_torture_test); rw_attribute(key_merging_disabled); rw_attribute(gc_always_rewrite); +rw_attribute(expensive_debug_checks); rw_attribute(freelist_percent); rw_attribute(cache_replacement_policy); rw_attribute(btree_shrinker_disabled); @@ -116,6 +124,7 @@ SHOW(__bch_cached_dev) sysfs_printf(data_csum, "%i", dc->disk.data_csum); var_printf(verify, "%i"); + var_printf(bypass_torture_test, "%i"); var_printf(writeback_metadata, "%i"); var_printf(writeback_running, "%i"); var_print(writeback_delay); @@ -150,10 +159,9 @@ SHOW(__bch_cached_dev) sysfs_hprint(dirty_data, bcache_dev_sectors_dirty(&dc->disk) << 9); - sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9); + sysfs_hprint(stripe_size, dc->disk.stripe_size << 9); var_printf(partial_stripes_expensive, "%u"); - var_printf(sequential_merge, "%i"); var_hprint(sequential_cutoff); var_hprint(readahead); @@ -185,6 +193,7 @@ STORE(__cached_dev) sysfs_strtoul(data_csum, dc->disk.data_csum); d_strtoul(verify); + d_strtoul(bypass_torture_test); d_strtoul(writeback_metadata); d_strtoul(writeback_running); d_strtoul(writeback_delay); @@ -199,7 +208,6 @@ STORE(__cached_dev) dc->writeback_rate_p_term_inverse, 1, INT_MAX); d_strtoul(writeback_rate_d_smooth); - d_strtoul(sequential_merge); d_strtoi_h(sequential_cutoff); d_strtoi_h(readahead); @@ -311,7 +319,6 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_stripe_size, &sysfs_partial_stripes_expensive, &sysfs_sequential_cutoff, - &sysfs_sequential_merge, &sysfs_clear_stats, &sysfs_running, &sysfs_state, @@ -319,6 +326,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_readahead, #ifdef CONFIG_BCACHE_DEBUG &sysfs_verify, + &sysfs_bypass_torture_test, #endif NULL }; @@ -366,7 +374,7 @@ STORE(__bch_flash_dev) } if (attr == &sysfs_unregister) { - atomic_set(&d->detaching, 1); + set_bit(BCACHE_DEV_DETACHING, &d->flags); bcache_device_stop(d); } @@ -481,7 +489,6 @@ lock_root: sysfs_print(btree_used_percent, btree_used(c)); sysfs_print(btree_nodes, c->gc_stats.nodes); - sysfs_hprint(dirty_data, c->gc_stats.dirty); sysfs_hprint(average_key_size, average_key_size(c)); sysfs_print(cache_read_races, @@ -492,6 +499,10 @@ lock_root: sysfs_print(writeback_keys_failed, atomic_long_read(&c->writeback_keys_failed)); + if (attr == &sysfs_errors) + return bch_snprint_string_list(buf, PAGE_SIZE, error_actions, + c->on_error); + /* See count_io_errors for why 88 */ sysfs_print(io_error_halflife, c->error_decay * 88); sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT); @@ -506,6 +517,8 @@ lock_root: sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); sysfs_printf(verify, "%i", c->verify); sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); + sysfs_printf(expensive_debug_checks, + "%i", c->expensive_debug_checks); sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); @@ -555,7 +568,7 @@ STORE(__bch_cache_set) } if (attr == &sysfs_trigger_gc) - bch_queue_gc(c); + wake_up_gc(c); if (attr == &sysfs_prune_cache) { struct shrink_control sc; @@ -569,6 +582,15 @@ STORE(__bch_cache_set) sysfs_strtoul(congested_write_threshold_us, c->congested_write_threshold_us); + if (attr == &sysfs_errors) { + ssize_t v = bch_read_string_list(buf, error_actions); + + if (v < 0) + return v; + + c->on_error = v; + } + if (attr == &sysfs_io_error_limit) c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT; @@ -579,6 +601,7 @@ STORE(__bch_cache_set) sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); sysfs_strtoul(verify, c->verify); sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); + sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); @@ -618,8 +641,8 @@ static struct attribute *bch_cache_set_files[] = { &sysfs_cache_available_percent, &sysfs_average_key_size, - &sysfs_dirty_data, + &sysfs_errors, &sysfs_io_error_limit, &sysfs_io_error_halflife, &sysfs_congested, @@ -653,6 +676,7 @@ static struct attribute *bch_cache_set_internal_files[] = { #ifdef CONFIG_BCACHE_DEBUG &sysfs_verify, &sysfs_key_merging_disabled, + &sysfs_expensive_debug_checks, #endif &sysfs_gc_always_rewrite, &sysfs_btree_shrinker_disabled, diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c index f7b6c197f90f..adbc3df17a80 100644 --- a/drivers/md/bcache/trace.c +++ b/drivers/md/bcache/trace.c @@ -1,6 +1,5 @@ #include "bcache.h" #include "btree.h" -#include "request.h" #include <linux/blktrace_api.h> #include <linux/module.h> diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 420dad545c7d..462214eeacbe 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -168,10 +168,14 @@ int bch_parse_uuid(const char *s, char *uuid) void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) { - uint64_t now = local_clock(); - uint64_t duration = time_after64(now, start_time) + uint64_t now, duration, last; + + spin_lock(&stats->lock); + + now = local_clock(); + duration = time_after64(now, start_time) ? now - start_time : 0; - uint64_t last = time_after64(now, stats->last) + last = time_after64(now, stats->last) ? now - stats->last : 0; stats->max_duration = max(stats->max_duration, duration); @@ -188,6 +192,8 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) } stats->last = now ?: 1; + + spin_unlock(&stats->lock); } /** diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index ea345c6896f4..362c4b3f8b4a 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -15,28 +15,18 @@ struct closure; -#ifdef CONFIG_BCACHE_EDEBUG +#ifdef CONFIG_BCACHE_DEBUG #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) -#else /* EDEBUG */ +#else /* DEBUG */ #define atomic_dec_bug(v) atomic_dec(v) #define atomic_inc_bug(v, i) atomic_inc(v) #endif -#define BITMASK(name, type, field, offset, size) \ -static inline uint64_t name(const type *k) \ -{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); } \ - \ -static inline void SET_##name(type *k, uint64_t v) \ -{ \ - k->field &= ~(~((uint64_t) ~0 << size) << offset); \ - k->field |= v << offset; \ -} - #define DECLARE_HEAP(type, name) \ struct { \ size_t size, used; \ @@ -388,6 +378,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[ ssize_t bch_read_string_list(const char *buf, const char * const list[]); struct time_stats { + spinlock_t lock; /* * all fields are in nanoseconds, averages are ewmas stored left shifted * by 8 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index ba3ee48320f2..99053b1251be 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -11,18 +11,11 @@ #include "debug.h" #include "writeback.h" +#include <linux/delay.h> +#include <linux/freezer.h> +#include <linux/kthread.h> #include <trace/events/bcache.h> -static struct workqueue_struct *dirty_wq; - -static void read_dirty(struct closure *); - -struct dirty_io { - struct closure cl; - struct cached_dev *dc; - struct bio bio; -}; - /* Rate limiting */ static void __update_writeback_rate(struct cached_dev *dc) @@ -72,9 +65,6 @@ out: dc->writeback_rate_derivative = derivative; dc->writeback_rate_change = change; dc->writeback_rate_target = target; - - schedule_delayed_work(&dc->writeback_rate_update, - dc->writeback_rate_update_seconds * HZ); } static void update_writeback_rate(struct work_struct *work) @@ -90,13 +80,16 @@ static void update_writeback_rate(struct work_struct *work) __update_writeback_rate(dc); up_read(&dc->writeback_lock); + + schedule_delayed_work(&dc->writeback_rate_update, + dc->writeback_rate_update_seconds * HZ); } static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) { uint64_t ret; - if (atomic_read(&dc->disk.detaching) || + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || !dc->writeback_percent) return 0; @@ -105,37 +98,11 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) return min_t(uint64_t, ret, HZ); } -/* Background writeback */ - -static bool dirty_pred(struct keybuf *buf, struct bkey *k) -{ - return KEY_DIRTY(k); -} - -static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k) -{ - uint64_t stripe; - unsigned nr_sectors = KEY_SIZE(k); - struct cached_dev *dc = container_of(buf, struct cached_dev, - writeback_keys); - unsigned stripe_size = 1 << dc->disk.stripe_size_bits; - - if (!KEY_DIRTY(k)) - return false; - - stripe = KEY_START(k) >> dc->disk.stripe_size_bits; - while (1) { - if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) != - stripe_size) - return false; - - if (nr_sectors <= stripe_size) - return true; - - nr_sectors -= stripe_size; - stripe++; - } -} +struct dirty_io { + struct closure cl; + struct cached_dev *dc; + struct bio bio; +}; static void dirty_init(struct keybuf_key *w) { @@ -153,131 +120,6 @@ static void dirty_init(struct keybuf_key *w) bch_bio_map(bio, NULL); } -static void refill_dirty(struct closure *cl) -{ - struct cached_dev *dc = container_of(cl, struct cached_dev, - writeback.cl); - struct keybuf *buf = &dc->writeback_keys; - bool searched_from_start = false; - struct bkey end = MAX_KEY; - SET_KEY_INODE(&end, dc->disk.id); - - if (!atomic_read(&dc->disk.detaching) && - !dc->writeback_running) - closure_return(cl); - - down_write(&dc->writeback_lock); - - if (!atomic_read(&dc->has_dirty)) { - SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); - bch_write_bdev_super(dc, NULL); - - up_write(&dc->writeback_lock); - closure_return(cl); - } - - if (bkey_cmp(&buf->last_scanned, &end) >= 0) { - buf->last_scanned = KEY(dc->disk.id, 0, 0); - searched_from_start = true; - } - - if (dc->partial_stripes_expensive) { - uint64_t i; - - for (i = 0; i < dc->disk.nr_stripes; i++) - if (atomic_read(dc->disk.stripe_sectors_dirty + i) == - 1 << dc->disk.stripe_size_bits) - goto full_stripes; - - goto normal_refill; -full_stripes: - bch_refill_keybuf(dc->disk.c, buf, &end, - dirty_full_stripe_pred); - } else { -normal_refill: - bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); - } - - if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { - /* Searched the entire btree - delay awhile */ - - if (RB_EMPTY_ROOT(&buf->keys)) { - atomic_set(&dc->has_dirty, 0); - cached_dev_put(dc); - } - - if (!atomic_read(&dc->disk.detaching)) - closure_delay(&dc->writeback, dc->writeback_delay * HZ); - } - - up_write(&dc->writeback_lock); - - bch_ratelimit_reset(&dc->writeback_rate); - - /* Punt to workqueue only so we don't recurse and blow the stack */ - continue_at(cl, read_dirty, dirty_wq); -} - -void bch_writeback_queue(struct cached_dev *dc) -{ - if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) { - if (!atomic_read(&dc->disk.detaching)) - closure_delay(&dc->writeback, dc->writeback_delay * HZ); - - continue_at(&dc->writeback.cl, refill_dirty, dirty_wq); - } -} - -void bch_writeback_add(struct cached_dev *dc) -{ - if (!atomic_read(&dc->has_dirty) && - !atomic_xchg(&dc->has_dirty, 1)) { - atomic_inc(&dc->count); - - if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { - SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); - /* XXX: should do this synchronously */ - bch_write_bdev_super(dc, NULL); - } - - bch_writeback_queue(dc); - - if (dc->writeback_percent) - schedule_delayed_work(&dc->writeback_rate_update, - dc->writeback_rate_update_seconds * HZ); - } -} - -void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, - uint64_t offset, int nr_sectors) -{ - struct bcache_device *d = c->devices[inode]; - unsigned stripe_size, stripe_offset; - uint64_t stripe; - - if (!d) - return; - - stripe_size = 1 << d->stripe_size_bits; - stripe = offset >> d->stripe_size_bits; - stripe_offset = offset & (stripe_size - 1); - - while (nr_sectors) { - int s = min_t(unsigned, abs(nr_sectors), - stripe_size - stripe_offset); - - if (nr_sectors < 0) - s = -s; - - atomic_add(s, d->stripe_sectors_dirty + stripe); - nr_sectors -= s; - stripe_offset = 0; - stripe++; - } -} - -/* Background writeback - IO loop */ - static void dirty_io_destructor(struct closure *cl) { struct dirty_io *io = container_of(cl, struct dirty_io, cl); @@ -297,26 +139,25 @@ static void write_dirty_finish(struct closure *cl) /* This is kind of a dumb way of signalling errors. */ if (KEY_DIRTY(&w->key)) { + int ret; unsigned i; - struct btree_op op; - bch_btree_op_init_stack(&op); + struct keylist keys; - op.type = BTREE_REPLACE; - bkey_copy(&op.replace, &w->key); + bch_keylist_init(&keys); - SET_KEY_DIRTY(&w->key, false); - bch_keylist_add(&op.keys, &w->key); + bkey_copy(keys.top, &w->key); + SET_KEY_DIRTY(keys.top, false); + bch_keylist_push(&keys); for (i = 0; i < KEY_PTRS(&w->key); i++) atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); - bch_btree_insert(&op, dc->disk.c); - closure_sync(&op.cl); + ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); - if (op.insert_collision) + if (ret) trace_bcache_writeback_collision(&w->key); - atomic_long_inc(op.insert_collision + atomic_long_inc(ret ? &dc->disk.c->writeback_keys_failed : &dc->disk.c->writeback_keys_done); } @@ -374,30 +215,33 @@ static void read_dirty_submit(struct closure *cl) continue_at(cl, write_dirty, system_wq); } -static void read_dirty(struct closure *cl) +static void read_dirty(struct cached_dev *dc) { - struct cached_dev *dc = container_of(cl, struct cached_dev, - writeback.cl); - unsigned delay = writeback_delay(dc, 0); + unsigned delay = 0; struct keybuf_key *w; struct dirty_io *io; + struct closure cl; + + closure_init_stack(&cl); /* * XXX: if we error, background writeback just spins. Should use some * mempools. */ - while (1) { + while (!kthread_should_stop()) { + try_to_freeze(); + w = bch_keybuf_next(&dc->writeback_keys); if (!w) break; BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); - if (delay > 0 && - (KEY_START(&w->key) != dc->last_read || - jiffies_to_msecs(delay) > 50)) - delay = schedule_timeout_uninterruptible(delay); + if (KEY_START(&w->key) != dc->last_read || + jiffies_to_msecs(delay) > 50) + while (!kthread_should_stop() && delay) + delay = schedule_timeout_interruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -423,7 +267,7 @@ static void read_dirty(struct closure *cl) trace_bcache_writeback(&w->key); down(&dc->in_flight); - closure_call(&io->cl, read_dirty_submit, NULL, cl); + closure_call(&io->cl, read_dirty_submit, NULL, &cl); delay = writeback_delay(dc, KEY_SIZE(&w->key)); } @@ -439,52 +283,205 @@ err: * Wait for outstanding writeback IOs to finish (and keybuf slots to be * freed) before refilling again */ - continue_at(cl, refill_dirty, dirty_wq); + closure_sync(&cl); } -/* Init */ +/* Scan for dirty data */ + +void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, + uint64_t offset, int nr_sectors) +{ + struct bcache_device *d = c->devices[inode]; + unsigned stripe_offset, stripe, sectors_dirty; + + if (!d) + return; + + stripe = offset_to_stripe(d, offset); + stripe_offset = offset & (d->stripe_size - 1); + + while (nr_sectors) { + int s = min_t(unsigned, abs(nr_sectors), + d->stripe_size - stripe_offset); + + if (nr_sectors < 0) + s = -s; + + if (stripe >= d->nr_stripes) + return; + + sectors_dirty = atomic_add_return(s, + d->stripe_sectors_dirty + stripe); + if (sectors_dirty == d->stripe_size) + set_bit(stripe, d->full_dirty_stripes); + else + clear_bit(stripe, d->full_dirty_stripes); + + nr_sectors -= s; + stripe_offset = 0; + stripe++; + } +} -static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op, - struct cached_dev *dc) +static bool dirty_pred(struct keybuf *buf, struct bkey *k) { - struct bkey *k; - struct btree_iter iter; - - bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0)); - while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) - if (!b->level) { - if (KEY_INODE(k) > dc->disk.id) - break; - - if (KEY_DIRTY(k)) - bcache_dev_sectors_dirty_add(b->c, dc->disk.id, - KEY_START(k), - KEY_SIZE(k)); - } else { - btree(sectors_dirty_init, k, b, op, dc); - if (KEY_INODE(k) > dc->disk.id) - break; - - cond_resched(); + return KEY_DIRTY(k); +} + +static void refill_full_stripes(struct cached_dev *dc) +{ + struct keybuf *buf = &dc->writeback_keys; + unsigned start_stripe, stripe, next_stripe; + bool wrapped = false; + + stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); + + if (stripe >= dc->disk.nr_stripes) + stripe = 0; + + start_stripe = stripe; + + while (1) { + stripe = find_next_bit(dc->disk.full_dirty_stripes, + dc->disk.nr_stripes, stripe); + + if (stripe == dc->disk.nr_stripes) + goto next; + + next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, + dc->disk.nr_stripes, stripe); + + buf->last_scanned = KEY(dc->disk.id, + stripe * dc->disk.stripe_size, 0); + + bch_refill_keybuf(dc->disk.c, buf, + &KEY(dc->disk.id, + next_stripe * dc->disk.stripe_size, 0), + dirty_pred); + + if (array_freelist_empty(&buf->freelist)) + return; + + stripe = next_stripe; +next: + if (wrapped && stripe > start_stripe) + return; + + if (stripe == dc->disk.nr_stripes) { + stripe = 0; + wrapped = true; } + } +} + +static bool refill_dirty(struct cached_dev *dc) +{ + struct keybuf *buf = &dc->writeback_keys; + struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); + bool searched_from_start = false; + + if (dc->partial_stripes_expensive) { + refill_full_stripes(dc); + if (array_freelist_empty(&buf->freelist)) + return false; + } + + if (bkey_cmp(&buf->last_scanned, &end) >= 0) { + buf->last_scanned = KEY(dc->disk.id, 0, 0); + searched_from_start = true; + } + + bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); + + return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; +} + +static int bch_writeback_thread(void *arg) +{ + struct cached_dev *dc = arg; + bool searched_full_index; + + while (!kthread_should_stop()) { + down_write(&dc->writeback_lock); + if (!atomic_read(&dc->has_dirty) || + (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && + !dc->writeback_running)) { + up_write(&dc->writeback_lock); + set_current_state(TASK_INTERRUPTIBLE); + + if (kthread_should_stop()) + return 0; + + try_to_freeze(); + schedule(); + continue; + } + + searched_full_index = refill_dirty(dc); + + if (searched_full_index && + RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { + atomic_set(&dc->has_dirty, 0); + cached_dev_put(dc); + SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); + bch_write_bdev_super(dc, NULL); + } + + up_write(&dc->writeback_lock); + + bch_ratelimit_reset(&dc->writeback_rate); + read_dirty(dc); + + if (searched_full_index) { + unsigned delay = dc->writeback_delay * HZ; + + while (delay && + !kthread_should_stop() && + !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + delay = schedule_timeout_interruptible(delay); + } + } return 0; } +/* Init */ + +struct sectors_dirty_init { + struct btree_op op; + unsigned inode; +}; + +static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, + struct bkey *k) +{ + struct sectors_dirty_init *op = container_of(_op, + struct sectors_dirty_init, op); + if (KEY_INODE(k) > op->inode) + return MAP_DONE; + + if (KEY_DIRTY(k)) + bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), + KEY_START(k), KEY_SIZE(k)); + + return MAP_CONTINUE; +} + void bch_sectors_dirty_init(struct cached_dev *dc) { - struct btree_op op; + struct sectors_dirty_init op; + + bch_btree_op_init(&op.op, -1); + op.inode = dc->disk.id; - bch_btree_op_init_stack(&op); - btree_root(sectors_dirty_init, dc->disk.c, &op, dc); + bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), + sectors_dirty_init_fn, 0); } -void bch_cached_dev_writeback_init(struct cached_dev *dc) +int bch_cached_dev_writeback_init(struct cached_dev *dc) { sema_init(&dc->in_flight, 64); - closure_init_unlocked(&dc->writeback); init_rwsem(&dc->writeback_lock); - bch_keybuf_init(&dc->writeback_keys); dc->writeback_metadata = true; @@ -498,22 +495,16 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate_p_term_inverse = 64; dc->writeback_rate_d_smooth = 8; + dc->writeback_thread = kthread_create(bch_writeback_thread, dc, + "bcache_writeback"); + if (IS_ERR(dc->writeback_thread)) + return PTR_ERR(dc->writeback_thread); + + set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); + INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); -} - -void bch_writeback_exit(void) -{ - if (dirty_wq) - destroy_workqueue(dirty_wq); -} - -int __init bch_writeback_init(void) -{ - dirty_wq = create_workqueue("bcache_writeback"); - if (!dirty_wq) - return -ENOMEM; return 0; } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index c91f61bb95b6..c9ddcf4614b9 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -14,20 +14,27 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } -static inline bool bcache_dev_stripe_dirty(struct bcache_device *d, +static inline unsigned offset_to_stripe(struct bcache_device *d, + uint64_t offset) +{ + do_div(offset, d->stripe_size); + return offset; +} + +static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned nr_sectors) { - uint64_t stripe = offset >> d->stripe_size_bits; + unsigned stripe = offset_to_stripe(&dc->disk, offset); while (1) { - if (atomic_read(d->stripe_sectors_dirty + stripe)) + if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) return true; - if (nr_sectors <= 1 << d->stripe_size_bits) + if (nr_sectors <= dc->disk.stripe_size) return false; - nr_sectors -= 1 << d->stripe_size_bits; + nr_sectors -= dc->disk.stripe_size; stripe++; } } @@ -38,12 +45,12 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, unsigned in_use = dc->disk.c->gc_stats.in_use; if (cache_mode != CACHE_MODE_WRITEBACK || - atomic_read(&dc->disk.detaching) || + test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in_use > CUTOFF_WRITEBACK_SYNC) return false; if (dc->partial_stripes_expensive && - bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector, + bcache_dev_stripe_dirty(dc, bio->bi_sector, bio_sectors(bio))) return true; @@ -54,11 +61,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, in_use <= CUTOFF_WRITEBACK; } +static inline void bch_writeback_queue(struct cached_dev *dc) +{ + wake_up_process(dc->writeback_thread); +} + +static inline void bch_writeback_add(struct cached_dev *dc) +{ + if (!atomic_read(&dc->has_dirty) && + !atomic_xchg(&dc->has_dirty, 1)) { + atomic_inc(&dc->count); + + if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { + SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); + /* XXX: should do this synchronously */ + bch_write_bdev_super(dc, NULL); + } + + bch_writeback_queue(dc); + } +} + void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); -void bch_writeback_queue(struct cached_dev *); -void bch_writeback_add(struct cached_dev *); void bch_sectors_dirty_init(struct cached_dev *dc); -void bch_cached_dev_writeback_init(struct cached_dev *); +int bch_cached_dev_writeback_init(struct cached_dev *); #endif diff --git a/drivers/md/md.c b/drivers/md/md.c index 8766eabb0014..b6b7a2866c9e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev) static struct ctl_table_header *raid_table_header; -static ctl_table raid_table[] = { +static struct ctl_table raid_table[] = { { .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, @@ -130,7 +130,7 @@ static ctl_table raid_table[] = { { } }; -static ctl_table raid_dir_table[] = { +static struct ctl_table raid_dir_table[] = { { .procname = "raid", .maxlen = 0, @@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = { { } }; -static ctl_table raid_root_table[] = { +static struct ctl_table raid_root_table[] = { { .procname = "dev", .maxlen = 0, @@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit) goto retry; } -static inline int mddev_lock(struct mddev * mddev) +static inline int __must_check mddev_lock(struct mddev * mddev) { return mutex_lock_interruptible(&mddev->reconfig_mutex); } +/* Sometimes we need to take the lock in a situation where + * failure due to interrupts is not acceptable. + */ +static inline void mddev_lock_nointr(struct mddev * mddev) +{ + mutex_lock(&mddev->reconfig_mutex); +} + static inline int mddev_is_locked(struct mddev *mddev) { return mutex_is_locked(&mddev->reconfig_mutex); @@ -2978,7 +2986,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) for_each_mddev(mddev, tmp) { struct md_rdev *rdev2; - mddev_lock(mddev); + mddev_lock_nointr(mddev); rdev_for_each(rdev2, mddev) if (rdev->bdev == rdev2->bdev && rdev != rdev2 && @@ -2994,7 +3002,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) break; } } - mddev_lock(my_mddev); + mddev_lock_nointr(my_mddev); if (overlap) { /* Someone else could have slipped in a size * change here, but doing so is just silly. @@ -3580,6 +3588,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; del_timer_sync(&mddev->safemode_timer); } + blk_set_stacking_limits(&mddev->queue->limits); pers->run(mddev); set_bit(MD_CHANGE_DEVS, &mddev->flags); mddev_resume(mddev); @@ -5258,7 +5267,7 @@ static void __md_stop_writes(struct mddev *mddev) void md_stop_writes(struct mddev *mddev) { - mddev_lock(mddev); + mddev_lock_nointr(mddev); __md_stop_writes(mddev); mddev_unlock(mddev); } @@ -5291,20 +5300,35 @@ EXPORT_SYMBOL_GPL(md_stop); static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) { int err = 0; + int did_freeze = 0; + + if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { + did_freeze = 1; + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + /* Thread might be blocked waiting for metadata update + * which will now never happen */ + wake_up_process(mddev->sync_thread->tsk); + } + mddev_unlock(mddev); + wait_event(resync_wait, mddev->sync_thread == NULL); + mddev_lock_nointr(mddev); + mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > !!bdev) { + if (atomic_read(&mddev->openers) > !!bdev || + mddev->sync_thread || + (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { printk("md: %s still in use.\n",mdname(mddev)); + if (did_freeze) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } err = -EBUSY; goto out; } - if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { - /* Someone opened the device since we flushed it - * so page cache could be dirty and it is too late - * to flush. So abort - */ - mutex_unlock(&mddev->open_mutex); - return -EBUSY; - } if (mddev->pers) { __md_stop_writes(mddev); @@ -5315,7 +5339,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) set_disk_ro(mddev->gendisk, 1); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); sysfs_notify_dirent_safe(mddev->sysfs_state); - err = 0; + err = 0; } out: mutex_unlock(&mddev->open_mutex); @@ -5331,20 +5355,34 @@ static int do_md_stop(struct mddev * mddev, int mode, { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; + int did_freeze = 0; + + if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { + did_freeze = 1; + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + /* Thread might be blocked waiting for metadata update + * which will now never happen */ + wake_up_process(mddev->sync_thread->tsk); + } + mddev_unlock(mddev); + wait_event(resync_wait, mddev->sync_thread == NULL); + mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); if (atomic_read(&mddev->openers) > !!bdev || - mddev->sysfs_active) { + mddev->sysfs_active || + mddev->sync_thread || + (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { printk("md: %s still in use.\n",mdname(mddev)); mutex_unlock(&mddev->open_mutex); - return -EBUSY; - } - if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { - /* Someone opened the device since we flushed it - * so page cache could be dirty and it is too late - * to flush. So abort - */ - mutex_unlock(&mddev->open_mutex); + if (did_freeze) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } return -EBUSY; } if (mddev->pers) { @@ -6551,7 +6589,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_DEVS, &mddev->flags) && !test_bit(MD_CHANGE_PENDING, &mddev->flags)); - mddev_lock(mddev); + mddev_lock_nointr(mddev); } } else { err = -EROFS; @@ -7361,9 +7399,6 @@ void md_do_sync(struct md_thread *thread) mddev->curr_resync = 2; try_again: - if (kthread_should_stop()) - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) goto skip; for_each_mddev(mddev2, tmp) { @@ -7388,7 +7423,7 @@ void md_do_sync(struct md_thread *thread) * be caught by 'softlockup' */ prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); - if (!kthread_should_stop() && + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && mddev2->curr_resync >= mddev->curr_resync) { printk(KERN_INFO "md: delaying %s of %s" " until %s has finished (they" @@ -7464,7 +7499,7 @@ void md_do_sync(struct md_thread *thread) last_check = 0; if (j>2) { - printk(KERN_INFO + printk(KERN_INFO "md: resuming %s of %s from checkpoint.\n", desc, mdname(mddev)); mddev->curr_resync = j; @@ -7501,7 +7536,8 @@ void md_do_sync(struct md_thread *thread) sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } - while (j >= mddev->resync_max && !kthread_should_stop()) { + while (j >= mddev->resync_max && + !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { /* As this condition is controlled by user-space, * we can block indefinitely, so use '_interruptible' * to avoid triggering warnings. @@ -7509,17 +7545,18 @@ void md_do_sync(struct md_thread *thread) flush_signals(current); /* just in case */ wait_event_interruptible(mddev->recovery_wait, mddev->resync_max > j - || kthread_should_stop()); + || test_bit(MD_RECOVERY_INTR, + &mddev->recovery)); } - if (kthread_should_stop()) - goto interrupted; + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; sectors = mddev->pers->sync_request(mddev, j, &skipped, currspeed < speed_min(mddev)); if (sectors == 0) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - goto out; + break; } if (!skipped) { /* actual IO requested */ @@ -7556,10 +7593,8 @@ void md_do_sync(struct md_thread *thread) last_mark = next; } - - if (kthread_should_stop()) - goto interrupted; - + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; /* * this loop exits only if either when we are slower than @@ -7582,11 +7617,12 @@ void md_do_sync(struct md_thread *thread) } } } - printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); + printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, + test_bit(MD_RECOVERY_INTR, &mddev->recovery) + ? "interrupted" : "done"); /* * this also signals 'finished resyncing' to md_stop */ - out: blk_finish_plug(&plug); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); @@ -7640,16 +7676,6 @@ void md_do_sync(struct md_thread *thread) set_bit(MD_RECOVERY_DONE, &mddev->recovery); md_wakeup_thread(mddev->thread); return; - - interrupted: - /* - * got a signal, exit. - */ - printk(KERN_INFO - "md: md_do_sync() got signal ... exiting\n"); - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - goto out; - } EXPORT_SYMBOL_GPL(md_do_sync); @@ -7894,6 +7920,7 @@ void md_reap_sync_thread(struct mddev *mddev) /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); + wake_up(&resync_wait); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af6681b19776..1e5a540995e9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -66,7 +66,8 @@ */ static int max_queued_requests = 1024; -static void allow_barrier(struct r1conf *conf); +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, + sector_t bi_sector); static void lower_barrier(struct r1conf *conf); static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) @@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data) } #define RESYNC_BLOCK_SIZE (64*1024) -//#define RESYNC_BLOCK_SIZE PAGE_SIZE +#define RESYNC_DEPTH 32 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) -#define RESYNC_WINDOW (2048*1024) +#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) +#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) +#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) { @@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio) struct bio *bio = r1_bio->master_bio; int done; struct r1conf *conf = r1_bio->mddev->private; + sector_t start_next_window = r1_bio->start_next_window; + sector_t bi_sector = bio->bi_sector; if (bio->bi_phys_segments) { unsigned long flags; @@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio) bio->bi_phys_segments--; done = (bio->bi_phys_segments == 0); spin_unlock_irqrestore(&conf->device_lock, flags); + /* + * make_request() might be waiting for + * bi_phys_segments to decrease + */ + wake_up(&conf->wait_barrier); } else done = 1; @@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio) * Wake up any possible resync thread that waits for the device * to go idle. */ - allow_barrier(conf); + allow_barrier(conf, start_next_window, bi_sector); } } @@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf) * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ -#define RESYNC_DEPTH 32 - static void raise_barrier(struct r1conf *conf) { spin_lock_irq(&conf->resync_lock); @@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf) /* block any new IO from starting */ conf->barrier++; - /* Now wait for all pending IO to complete */ + /* For these conditions we must wait: + * A: while the array is in frozen state + * B: while barrier >= RESYNC_DEPTH, meaning resync reach + * the max count which allowed. + * C: next_resync + RESYNC_SECTORS > start_next_window, meaning + * next resync will reach to the window which normal bios are + * handling. + */ wait_event_lock_irq(conf->wait_barrier, - !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + !conf->array_frozen && + conf->barrier < RESYNC_DEPTH && + (conf->start_next_window >= + conf->next_resync + RESYNC_SECTORS), conf->resync_lock); spin_unlock_irq(&conf->resync_lock); @@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf) wake_up(&conf->wait_barrier); } -static void wait_barrier(struct r1conf *conf) +static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) { + bool wait = false; + + if (conf->array_frozen || !bio) + wait = true; + else if (conf->barrier && bio_data_dir(bio) == WRITE) { + if (conf->next_resync < RESYNC_WINDOW_SECTORS) + wait = true; + else if ((conf->next_resync - RESYNC_WINDOW_SECTORS + >= bio_end_sector(bio)) || + (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_sector)) + wait = false; + else + wait = true; + } + + return wait; +} + +static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) +{ + sector_t sector = 0; + spin_lock_irq(&conf->resync_lock); - if (conf->barrier) { + if (need_to_wait_for_sync(conf, bio)) { conf->nr_waiting++; /* Wait for the barrier to drop. * However if there are already pending @@ -860,22 +901,67 @@ static void wait_barrier(struct r1conf *conf) * count down. */ wait_event_lock_irq(conf->wait_barrier, - !conf->barrier || - (conf->nr_pending && + !conf->array_frozen && + (!conf->barrier || + ((conf->start_next_window < + conf->next_resync + RESYNC_SECTORS) && current->bio_list && - !bio_list_empty(current->bio_list)), + !bio_list_empty(current->bio_list))), conf->resync_lock); conf->nr_waiting--; } + + if (bio && bio_data_dir(bio) == WRITE) { + if (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_sector) { + if (conf->start_next_window == MaxSector) + conf->start_next_window = + conf->next_resync + + NEXT_NORMALIO_DISTANCE; + + if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) + <= bio->bi_sector) + conf->next_window_requests++; + else + conf->current_window_requests++; + } + if (bio->bi_sector >= conf->start_next_window) + sector = conf->start_next_window; + } + conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); + return sector; } -static void allow_barrier(struct r1conf *conf) +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, + sector_t bi_sector) { unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); conf->nr_pending--; + if (start_next_window) { + if (start_next_window == conf->start_next_window) { + if (conf->start_next_window + NEXT_NORMALIO_DISTANCE + <= bi_sector) + conf->next_window_requests--; + else + conf->current_window_requests--; + } else + conf->current_window_requests--; + + if (!conf->current_window_requests) { + if (conf->next_window_requests) { + conf->current_window_requests = + conf->next_window_requests; + conf->next_window_requests = 0; + conf->start_next_window += + NEXT_NORMALIO_DISTANCE; + } else + conf->start_next_window = MaxSector; + } + } spin_unlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } @@ -884,8 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra) { /* stop syncio and normal IO and wait for everything to * go quite. - * We increment barrier and nr_waiting, and then - * wait until nr_pending match nr_queued+extra + * We wait until nr_pending match nr_queued+extra * This is called in the context of one normal IO request * that has failed. Thus any sync request that might be pending * will be blocked by nr_pending, and we need to wait for @@ -895,8 +980,7 @@ static void freeze_array(struct r1conf *conf, int extra) * we continue. */ spin_lock_irq(&conf->resync_lock); - conf->barrier++; - conf->nr_waiting++; + conf->array_frozen = 1; wait_event_lock_irq_cmd(conf->wait_barrier, conf->nr_pending == conf->nr_queued+extra, conf->resync_lock, @@ -907,8 +991,7 @@ static void unfreeze_array(struct r1conf *conf) { /* reverse the effect of the freeze */ spin_lock_irq(&conf->resync_lock); - conf->barrier--; - conf->nr_waiting--; + conf->array_frozen = 0; wake_up(&conf->wait_barrier); spin_unlock_irq(&conf->resync_lock); } @@ -1013,6 +1096,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) int first_clone; int sectors_handled; int max_sectors; + sector_t start_next_window; /* * Register the new request and wait if the reconstruction @@ -1042,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) finish_wait(&conf->wait_barrier, &w); } - wait_barrier(conf); + start_next_window = wait_barrier(conf, bio); bitmap = mddev->bitmap; @@ -1163,6 +1247,7 @@ read_again: disks = conf->raid_disks * 2; retry_write: + r1_bio->start_next_window = start_next_window; blocked_rdev = NULL; rcu_read_lock(); max_sectors = r1_bio->sectors; @@ -1231,14 +1316,24 @@ read_again: if (unlikely(blocked_rdev)) { /* Wait for this device to become unblocked */ int j; + sector_t old = start_next_window; for (j = 0; j < i; j++) if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; - allow_barrier(conf); + allow_barrier(conf, start_next_window, bio->bi_sector); md_wait_for_blocked_rdev(blocked_rdev, mddev); - wait_barrier(conf); + start_next_window = wait_barrier(conf, bio); + /* + * We must make sure the multi r1bios of bio have + * the same value of bi_phys_segments + */ + if (bio->bi_phys_segments && old && + old != start_next_window) + /* Wait for the former r1bio(s) to complete */ + wait_event(conf->wait_barrier, + bio->bi_phys_segments == 1); goto retry_write; } @@ -1438,11 +1533,14 @@ static void print_conf(struct r1conf *conf) static void close_sync(struct r1conf *conf) { - wait_barrier(conf); - allow_barrier(conf); + wait_barrier(conf, NULL); + allow_barrier(conf, 0, 0); mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; + + conf->next_resync = 0; + conf->start_next_window = MaxSector; } static int raid1_spare_active(struct mddev *mddev) @@ -2714,6 +2812,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) conf->pending_count = 0; conf->recovery_disabled = mddev->recovery_disabled - 1; + conf->start_next_window = MaxSector; + conf->current_window_requests = conf->next_window_requests = 0; + err = -EIO; for (i = 0; i < conf->raid_disks * 2; i++) { @@ -2871,8 +2972,8 @@ static int stop(struct mddev *mddev) atomic_read(&bitmap->behind_writes) == 0); } - raise_barrier(conf); - lower_barrier(conf); + freeze_array(conf, 0); + unfreeze_array(conf); md_unregister_thread(&mddev->thread); if (conf->r1bio_pool) @@ -3031,10 +3132,10 @@ static void raid1_quiesce(struct mddev *mddev, int state) wake_up(&conf->wait_barrier); break; case 1: - raise_barrier(conf); + freeze_array(conf, 0); break; case 0: - lower_barrier(conf); + unfreeze_array(conf); break; } } @@ -3051,7 +3152,8 @@ static void *raid1_takeover(struct mddev *mddev) mddev->new_chunk_sectors = 0; conf = setup_conf(mddev); if (!IS_ERR(conf)) - conf->barrier = 1; + /* Array must appear to be quiesced */ + conf->array_frozen = 1; return conf; } return ERR_PTR(-EINVAL); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 0ff3715fb7eb..9bebca7bff2f 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -41,6 +41,19 @@ struct r1conf { */ sector_t next_resync; + /* When raid1 starts resync, we divide array into four partitions + * |---------|--------------|---------------------|-------------| + * next_resync start_next_window end_window + * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE + * end_window = start_next_window + NEXT_NORMALIO_DISTANCE + * current_window_requests means the count of normalIO between + * start_next_window and end_window. + * next_window_requests means the count of normalIO after end_window. + * */ + sector_t start_next_window; + int current_window_requests; + int next_window_requests; + spinlock_t device_lock; /* list of 'struct r1bio' that need to be processed by raid1d, @@ -65,6 +78,7 @@ struct r1conf { int nr_waiting; int nr_queued; int barrier; + int array_frozen; /* Set to 1 if a full sync is needed, (fresh device added). * Cleared when a sync completes. @@ -111,6 +125,7 @@ struct r1bio { * in this BehindIO request */ sector_t sector; + sector_t start_next_window; int sectors; unsigned long state; struct mddev *mddev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7c3508abb5e1..c504e8389e69 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, mddev->flags == 0 || - kthread_should_stop()); + test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { + allow_barrier(conf); + return sectors_done; + } conf->reshape_safe = mddev->reshape_position; allow_barrier(conf); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7f0e17a27aeb..47da0af6322b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) return &conf->stripe_hashtbl[hash]; } +static inline int stripe_hash_locks_hash(sector_t sect) +{ + return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; +} + +static inline void lock_device_hash_lock(struct r5conf *conf, int hash) +{ + spin_lock_irq(conf->hash_locks + hash); + spin_lock(&conf->device_lock); +} + +static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) +{ + spin_unlock(&conf->device_lock); + spin_unlock_irq(conf->hash_locks + hash); +} + +static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) +{ + int i; + local_irq_disable(); + spin_lock(conf->hash_locks); + for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) + spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); + spin_lock(&conf->device_lock); +} + +static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) +{ + int i; + spin_unlock(&conf->device_lock); + for (i = NR_STRIPE_HASH_LOCKS; i; i--) + spin_unlock(conf->hash_locks + i - 1); + local_irq_enable(); +} + /* bio's attached to a stripe+device for I/O are linked together in bi_sector * order without overlap. There may be several bio's per stripe+device, and * a bio could span several devices. @@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh) } } -static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, + struct list_head *temp_inactive_list) { BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); @@ -278,23 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); - wake_up(&conf->wait_for_stripe); - if (conf->retry_read_aligned) - md_wakeup_thread(conf->mddev->thread); - } + if (!test_bit(STRIPE_EXPANDING, &sh->state)) + list_add_tail(&sh->lru, temp_inactive_list); } } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, + struct list_head *temp_inactive_list) { if (atomic_dec_and_test(&sh->count)) - do_release_stripe(conf, sh); + do_release_stripe(conf, sh, temp_inactive_list); +} + +/* + * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list + * + * Be careful: Only one task can add/delete stripes from temp_inactive_list at + * given time. Adding stripes only takes device lock, while deleting stripes + * only takes hash lock. + */ +static void release_inactive_stripe_list(struct r5conf *conf, + struct list_head *temp_inactive_list, + int hash) +{ + int size; + bool do_wakeup = false; + unsigned long flags; + + if (hash == NR_STRIPE_HASH_LOCKS) { + size = NR_STRIPE_HASH_LOCKS; + hash = NR_STRIPE_HASH_LOCKS - 1; + } else + size = 1; + while (size) { + struct list_head *list = &temp_inactive_list[size - 1]; + + /* + * We don't hold any lock here yet, get_active_stripe() might + * remove stripes from the list + */ + if (!list_empty_careful(list)) { + spin_lock_irqsave(conf->hash_locks + hash, flags); + if (list_empty(conf->inactive_list + hash) && + !list_empty(list)) + atomic_dec(&conf->empty_inactive_list_nr); + list_splice_tail_init(list, conf->inactive_list + hash); + do_wakeup = true; + spin_unlock_irqrestore(conf->hash_locks + hash, flags); + } + size--; + hash--; + } + + if (do_wakeup) { + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); + } } /* should hold conf->device_lock already */ -static int release_stripe_list(struct r5conf *conf) +static int release_stripe_list(struct r5conf *conf, + struct list_head *temp_inactive_list) { struct stripe_head *sh; int count = 0; @@ -303,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf) head = llist_del_all(&conf->released_stripes); head = llist_reverse_order(head); while (head) { + int hash; + sh = llist_entry(head, struct stripe_head, release_list); head = llist_next(head); /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ @@ -313,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf) * again, the count is always > 1. This is true for * STRIPE_ON_UNPLUG_LIST bit too. */ - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &temp_inactive_list[hash]); count++; } @@ -324,9 +409,12 @@ static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; + struct list_head list; + int hash; bool wakeup; - if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) + if (unlikely(!conf->mddev->thread) || + test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) goto slow_path; wakeup = llist_add(&sh->release_list, &conf->released_stripes); if (wakeup) @@ -336,8 +424,11 @@ slow_path: local_irq_save(flags); /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { - do_release_stripe(conf, sh); + INIT_LIST_HEAD(&list); + hash = sh->hash_lock_index; + do_release_stripe(conf, sh, &list); spin_unlock(&conf->device_lock); + release_inactive_stripe_list(conf, &list, hash); } local_irq_restore(flags); } @@ -362,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) /* find an idle stripe, make sure it is unhashed, and return it. */ -static struct stripe_head *get_free_stripe(struct r5conf *conf) +static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh = NULL; struct list_head *first; - if (list_empty(&conf->inactive_list)) + if (list_empty(conf->inactive_list + hash)) goto out; - first = conf->inactive_list.next; + first = (conf->inactive_list + hash)->next; sh = list_entry(first, struct stripe_head, lru); list_del_init(first); remove_hash(sh); atomic_inc(&conf->active_stripes); + BUG_ON(hash != sh->hash_lock_index); + if (list_empty(conf->inactive_list + hash)) + atomic_inc(&conf->empty_inactive_list_nr); out: return sh; } @@ -416,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) { struct r5conf *conf = sh->raid_conf; - int i; + int i, seq; BUG_ON(atomic_read(&sh->count) != 0); BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); @@ -426,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) (unsigned long long)sh->sector); remove_hash(sh); - +retry: + seq = read_seqcount_begin(&conf->gen_lock); sh->generation = conf->generation - previous; sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; sh->sector = sector; @@ -448,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) dev->flags = 0; raid5_build_block(sh, i, previous); } + if (read_seqcount_retry(&conf->gen_lock, seq)) + goto retry; insert_hash(conf, sh); sh->cpu = smp_processor_id(); } @@ -552,29 +649,31 @@ get_active_stripe(struct r5conf *conf, sector_t sector, int previous, int noblock, int noquiesce) { struct stripe_head *sh; + int hash = stripe_hash_locks_hash(sector); pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); - spin_lock_irq(&conf->device_lock); + spin_lock_irq(conf->hash_locks + hash); do { wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0 || noquiesce, - conf->device_lock); + *(conf->hash_locks + hash)); sh = __find_stripe(conf, sector, conf->generation - previous); if (!sh) { if (!conf->inactive_blocked) - sh = get_free_stripe(conf); + sh = get_free_stripe(conf, hash); if (noblock && sh == NULL) break; if (!sh) { conf->inactive_blocked = 1; - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(&conf->inactive_list) && - (atomic_read(&conf->active_stripes) - < (conf->max_nr_stripes *3/4) - || !conf->inactive_blocked), - conf->device_lock); + wait_event_lock_irq( + conf->wait_for_stripe, + !list_empty(conf->inactive_list + hash) && + (atomic_read(&conf->active_stripes) + < (conf->max_nr_stripes * 3 / 4) + || !conf->inactive_blocked), + *(conf->hash_locks + hash)); conf->inactive_blocked = 0; } else init_stripe(sh, sector, previous); @@ -585,9 +684,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector, && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); } else { + spin_lock(&conf->device_lock); if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); if (list_empty(&sh->lru) && + !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state) && !test_bit(STRIPE_EXPANDING, &sh->state)) BUG(); list_del_init(&sh->lru); @@ -595,6 +696,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, sh->group->stripes_cnt--; sh->group = NULL; } + spin_unlock(&conf->device_lock); } } } while (sh == NULL); @@ -602,7 +704,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, if (sh) atomic_inc(&sh->count); - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(conf->hash_locks + hash); return sh; } @@ -758,7 +860,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_sector = (sh->sector + rdev->data_offset); if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) - bi->bi_rw |= REQ_FLUSH; + bi->bi_rw |= REQ_NOMERGE; bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -1582,7 +1684,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) put_cpu(); } -static int grow_one_stripe(struct r5conf *conf) +static int grow_one_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh; sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); @@ -1598,6 +1700,7 @@ static int grow_one_stripe(struct r5conf *conf) kmem_cache_free(conf->slab_cache, sh); return 0; } + sh->hash_lock_index = hash; /* we just created an active stripe so... */ atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); @@ -1610,6 +1713,7 @@ static int grow_stripes(struct r5conf *conf, int num) { struct kmem_cache *sc; int devs = max(conf->raid_disks, conf->previous_raid_disks); + int hash; if (conf->mddev->gendisk) sprintf(conf->cache_name[0], @@ -1627,9 +1731,13 @@ static int grow_stripes(struct r5conf *conf, int num) return 1; conf->slab_cache = sc; conf->pool_size = devs; - while (num--) - if (!grow_one_stripe(conf)) + hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; + while (num--) { + if (!grow_one_stripe(conf, hash)) return 1; + conf->max_nr_stripes++; + hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; + } return 0; } @@ -1687,6 +1795,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) int err; struct kmem_cache *sc; int i; + int hash, cnt; if (newsize <= conf->pool_size) return 0; /* never bother to shrink */ @@ -1726,19 +1835,29 @@ static int resize_stripes(struct r5conf *conf, int newsize) * OK, we have enough stripes, start collecting inactive * stripes and copying them over */ + hash = 0; + cnt = 0; list_for_each_entry(nsh, &newstripes, lru) { - spin_lock_irq(&conf->device_lock); - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(&conf->inactive_list), - conf->device_lock); - osh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); + lock_device_hash_lock(conf, hash); + wait_event_cmd(conf->wait_for_stripe, + !list_empty(conf->inactive_list + hash), + unlock_device_hash_lock(conf, hash), + lock_device_hash_lock(conf, hash)); + osh = get_free_stripe(conf, hash); + unlock_device_hash_lock(conf, hash); atomic_set(&nsh->count, 1); for(i=0; i<conf->pool_size; i++) nsh->dev[i].page = osh->dev[i].page; for( ; i<newsize; i++) nsh->dev[i].page = NULL; + nsh->hash_lock_index = hash; kmem_cache_free(conf->slab_cache, osh); + cnt++; + if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + + !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { + hash++; + cnt = 0; + } } kmem_cache_destroy(conf->slab_cache); @@ -1797,13 +1916,13 @@ static int resize_stripes(struct r5conf *conf, int newsize) return err; } -static int drop_one_stripe(struct r5conf *conf) +static int drop_one_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh; - spin_lock_irq(&conf->device_lock); - sh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); + spin_lock_irq(conf->hash_locks + hash); + sh = get_free_stripe(conf, hash); + spin_unlock_irq(conf->hash_locks + hash); if (!sh) return 0; BUG_ON(atomic_read(&sh->count)); @@ -1815,8 +1934,10 @@ static int drop_one_stripe(struct r5conf *conf) static void shrink_stripes(struct r5conf *conf) { - while (drop_one_stripe(conf)) - ; + int hash; + for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++) + while (drop_one_stripe(conf, hash)) + ; if (conf->slab_cache) kmem_cache_destroy(conf->slab_cache); @@ -1921,6 +2042,9 @@ static void raid5_end_read_request(struct bio * bi, int error) mdname(conf->mddev), bdn); else retry = 1; + if (set_bad && test_bit(In_sync, &rdev->flags) + && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + retry = 1; if (retry) if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { set_bit(R5_ReadError, &sh->dev[i].flags); @@ -3900,7 +4024,8 @@ static void raid5_activate_delayed(struct r5conf *conf) } } -static void activate_bit_delay(struct r5conf *conf) +static void activate_bit_delay(struct r5conf *conf, + struct list_head *temp_inactive_list) { /* device_lock is held */ struct list_head head; @@ -3908,9 +4033,11 @@ static void activate_bit_delay(struct r5conf *conf) list_del_init(&conf->bitmap_list); while (!list_empty(&head)) { struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); + int hash; list_del_init(&sh->lru); atomic_inc(&sh->count); - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &temp_inactive_list[hash]); } } @@ -3926,7 +4053,7 @@ int md_raid5_congested(struct mddev *mddev, int bits) return 1; if (conf->quiesce) return 1; - if (list_empty_careful(&conf->inactive_list)) + if (atomic_read(&conf->empty_inactive_list_nr)) return 1; return 0; @@ -4256,6 +4383,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) struct raid5_plug_cb { struct blk_plug_cb cb; struct list_head list; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; }; static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) @@ -4266,6 +4394,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) struct mddev *mddev = cb->cb.data; struct r5conf *conf = mddev->private; int cnt = 0; + int hash; if (cb->list.next && !list_empty(&cb->list)) { spin_lock_irq(&conf->device_lock); @@ -4283,11 +4412,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) * STRIPE_ON_RELEASE_LIST could be set here. In that * case, the count is always > 1 here */ - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); cnt++; } spin_unlock_irq(&conf->device_lock); } + release_inactive_stripe_list(conf, cb->temp_inactive_list, + NR_STRIPE_HASH_LOCKS); if (mddev->queue) trace_block_unplug(mddev->queue, cnt, !from_schedule); kfree(cb); @@ -4308,8 +4440,12 @@ static void release_stripe_plug(struct mddev *mddev, cb = container_of(blk_cb, struct raid5_plug_cb, cb); - if (cb->list.next == NULL) + if (cb->list.next == NULL) { + int i; INIT_LIST_HEAD(&cb->list); + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(cb->temp_inactive_list + i); + } if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) list_add_tail(&sh->lru, &cb->list); @@ -4692,14 +4828,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { /* Cannot proceed until we've updated the superblock... */ wait_event(conf->wait_for_overlap, - atomic_read(&conf->reshape_stripes)==0); + atomic_read(&conf->reshape_stripes)==0 + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (atomic_read(&conf->reshape_stripes) != 0) + return 0; mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, mddev->flags == 0 || - kthread_should_stop()); + test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + return 0; spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); @@ -4782,7 +4923,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk >= mddev->resync_max - mddev->curr_resync_completed) { /* Cannot proceed until we've updated the superblock... */ wait_event(conf->wait_for_overlap, - atomic_read(&conf->reshape_stripes) == 0); + atomic_read(&conf->reshape_stripes) == 0 + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (atomic_read(&conf->reshape_stripes) != 0) + goto ret; mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; @@ -4790,13 +4934,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_DEVS, &mddev->flags) - || kthread_should_stop()); + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + goto ret; spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } +ret: return reshape_sectors; } @@ -4954,27 +5101,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) } static int handle_active_stripes(struct r5conf *conf, int group, - struct r5worker *worker) + struct r5worker *worker, + struct list_head *temp_inactive_list) { struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; - int i, batch_size = 0; + int i, batch_size = 0, hash; + bool release_inactive = false; while (batch_size < MAX_STRIPE_BATCH && (sh = __get_priority_stripe(conf, group)) != NULL) batch[batch_size++] = sh; - if (batch_size == 0) - return batch_size; + if (batch_size == 0) { + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + if (!list_empty(temp_inactive_list + i)) + break; + if (i == NR_STRIPE_HASH_LOCKS) + return batch_size; + release_inactive = true; + } spin_unlock_irq(&conf->device_lock); + release_inactive_stripe_list(conf, temp_inactive_list, + NR_STRIPE_HASH_LOCKS); + + if (release_inactive) { + spin_lock_irq(&conf->device_lock); + return 0; + } + for (i = 0; i < batch_size; i++) handle_stripe(batch[i]); cond_resched(); spin_lock_irq(&conf->device_lock); - for (i = 0; i < batch_size; i++) - __release_stripe(conf, batch[i]); + for (i = 0; i < batch_size; i++) { + hash = batch[i]->hash_lock_index; + __release_stripe(conf, batch[i], &temp_inactive_list[hash]); + } return batch_size; } @@ -4995,9 +5160,10 @@ static void raid5_do_work(struct work_struct *work) while (1) { int batch_size, released; - released = release_stripe_list(conf); + released = release_stripe_list(conf, worker->temp_inactive_list); - batch_size = handle_active_stripes(conf, group_id, worker); + batch_size = handle_active_stripes(conf, group_id, worker, + worker->temp_inactive_list); worker->working = false; if (!batch_size && !released) break; @@ -5036,7 +5202,7 @@ static void raid5d(struct md_thread *thread) struct bio *bio; int batch_size, released; - released = release_stripe_list(conf); + released = release_stripe_list(conf, conf->temp_inactive_list); if ( !list_empty(&conf->bitmap_list)) { @@ -5046,7 +5212,7 @@ static void raid5d(struct md_thread *thread) bitmap_unplug(mddev->bitmap); spin_lock_irq(&conf->device_lock); conf->seq_write = conf->seq_flush; - activate_bit_delay(conf); + activate_bit_delay(conf, conf->temp_inactive_list); } raid5_activate_delayed(conf); @@ -5060,7 +5226,8 @@ static void raid5d(struct md_thread *thread) handled++; } - batch_size = handle_active_stripes(conf, ANY_GROUP, NULL); + batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, + conf->temp_inactive_list); if (!batch_size && !released) break; handled += batch_size; @@ -5096,22 +5263,29 @@ raid5_set_cache_size(struct mddev *mddev, int size) { struct r5conf *conf = mddev->private; int err; + int hash; if (size <= 16 || size > 32768) return -EINVAL; + hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; while (size < conf->max_nr_stripes) { - if (drop_one_stripe(conf)) + if (drop_one_stripe(conf, hash)) conf->max_nr_stripes--; else break; + hash--; + if (hash < 0) + hash = NR_STRIPE_HASH_LOCKS - 1; } err = md_allow_write(mddev); if (err) return err; + hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; while (size > conf->max_nr_stripes) { - if (grow_one_stripe(conf)) + if (grow_one_stripe(conf, hash)) conf->max_nr_stripes++; else break; + hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; } return 0; } @@ -5199,15 +5373,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page) return 0; } -static int alloc_thread_groups(struct r5conf *conf, int cnt); +static int alloc_thread_groups(struct r5conf *conf, int cnt, + int *group_cnt, + int *worker_cnt_per_group, + struct r5worker_group **worker_groups); static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) { struct r5conf *conf = mddev->private; unsigned long new; int err; - struct r5worker_group *old_groups; - int old_group_cnt; + struct r5worker_group *new_groups, *old_groups; + int group_cnt, worker_cnt_per_group; if (len >= PAGE_SIZE) return -EINVAL; @@ -5223,14 +5400,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) mddev_suspend(mddev); old_groups = conf->worker_groups; - old_group_cnt = conf->worker_cnt_per_group; + if (old_groups) + flush_workqueue(raid5_wq); + + err = alloc_thread_groups(conf, new, + &group_cnt, &worker_cnt_per_group, + &new_groups); + if (!err) { + spin_lock_irq(&conf->device_lock); + conf->group_cnt = group_cnt; + conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_groups = new_groups; + spin_unlock_irq(&conf->device_lock); - conf->worker_groups = NULL; - err = alloc_thread_groups(conf, new); - if (err) { - conf->worker_groups = old_groups; - conf->worker_cnt_per_group = old_group_cnt; - } else { if (old_groups) kfree(old_groups[0].workers); kfree(old_groups); @@ -5260,40 +5442,47 @@ static struct attribute_group raid5_attrs_group = { .attrs = raid5_attrs, }; -static int alloc_thread_groups(struct r5conf *conf, int cnt) +static int alloc_thread_groups(struct r5conf *conf, int cnt, + int *group_cnt, + int *worker_cnt_per_group, + struct r5worker_group **worker_groups) { - int i, j; + int i, j, k; ssize_t size; struct r5worker *workers; - conf->worker_cnt_per_group = cnt; + *worker_cnt_per_group = cnt; if (cnt == 0) { - conf->worker_groups = NULL; + *group_cnt = 0; + *worker_groups = NULL; return 0; } - conf->group_cnt = num_possible_nodes(); + *group_cnt = num_possible_nodes(); size = sizeof(struct r5worker) * cnt; - workers = kzalloc(size * conf->group_cnt, GFP_NOIO); - conf->worker_groups = kzalloc(sizeof(struct r5worker_group) * - conf->group_cnt, GFP_NOIO); - if (!conf->worker_groups || !workers) { + workers = kzalloc(size * *group_cnt, GFP_NOIO); + *worker_groups = kzalloc(sizeof(struct r5worker_group) * + *group_cnt, GFP_NOIO); + if (!*worker_groups || !workers) { kfree(workers); - kfree(conf->worker_groups); - conf->worker_groups = NULL; + kfree(*worker_groups); return -ENOMEM; } - for (i = 0; i < conf->group_cnt; i++) { + for (i = 0; i < *group_cnt; i++) { struct r5worker_group *group; - group = &conf->worker_groups[i]; + group = worker_groups[i]; INIT_LIST_HEAD(&group->handle_list); group->conf = conf; group->workers = workers + i * cnt; for (j = 0; j < cnt; j++) { - group->workers[j].group = group; - INIT_WORK(&group->workers[j].work, raid5_do_work); + struct r5worker *worker = group->workers + j; + worker->group = group; + INIT_WORK(&worker->work, raid5_do_work); + + for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) + INIT_LIST_HEAD(worker->temp_inactive_list + k); } } @@ -5444,6 +5633,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) struct md_rdev *rdev; struct disk_info *disk; char pers_name[6]; + int i; + int group_cnt, worker_cnt_per_group; + struct r5worker_group *new_group; if (mddev->new_level != 5 && mddev->new_level != 4 @@ -5478,7 +5670,12 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (conf == NULL) goto abort; /* Don't enable multi-threading by default*/ - if (alloc_thread_groups(conf, 0)) + if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, + &new_group)) { + conf->group_cnt = group_cnt; + conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_groups = new_group; + } else goto abort; spin_lock_init(&conf->device_lock); seqcount_init(&conf->gen_lock); @@ -5488,7 +5685,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) INIT_LIST_HEAD(&conf->hold_list); INIT_LIST_HEAD(&conf->delayed_list); INIT_LIST_HEAD(&conf->bitmap_list); - INIT_LIST_HEAD(&conf->inactive_list); init_llist_head(&conf->released_stripes); atomic_set(&conf->active_stripes, 0); atomic_set(&conf->preread_active_stripes, 0); @@ -5514,6 +5710,21 @@ static struct r5conf *setup_conf(struct mddev *mddev) if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; + /* We init hash_locks[0] separately to that it can be used + * as the reference lock in the spin_lock_nest_lock() call + * in lock_all_device_hash_locks_irq in order to convince + * lockdep that we know what we are doing. + */ + spin_lock_init(conf->hash_locks); + for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) + spin_lock_init(conf->hash_locks + i); + + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(conf->inactive_list + i); + + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(conf->temp_inactive_list + i); + conf->level = mddev->new_level; if (raid5_alloc_percpu(conf) != 0) goto abort; @@ -5554,7 +5765,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) else conf->max_degraded = 1; conf->algorithm = mddev->new_layout; - conf->max_nr_stripes = NR_STRIPES; conf->reshape_progress = mddev->reshape_position; if (conf->reshape_progress != MaxSector) { conf->prev_chunk_sectors = mddev->chunk_sectors; @@ -5563,7 +5773,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; - if (grow_stripes(conf, conf->max_nr_stripes)) { + atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); + if (grow_stripes(conf, NR_STRIPES)) { printk(KERN_ERR "md/raid:%s: couldn't allocate %dkB for buffers\n", mdname(mddev), memory); @@ -6369,12 +6580,18 @@ static int raid5_start_reshape(struct mddev *mddev) if (!mddev->sync_thread) { mddev->recovery = 0; spin_lock_irq(&conf->device_lock); + write_seqcount_begin(&conf->gen_lock); mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; + mddev->new_chunk_sectors = + conf->chunk_sectors = conf->prev_chunk_sectors; + mddev->new_layout = conf->algorithm = conf->prev_algo; rdev_for_each(rdev, mddev) rdev->new_data_offset = rdev->data_offset; smp_wmb(); + conf->generation --; conf->reshape_progress = MaxSector; mddev->reshape_position = MaxSector; + write_seqcount_end(&conf->gen_lock); spin_unlock_irq(&conf->device_lock); return -EAGAIN; } @@ -6462,27 +6679,28 @@ static void raid5_quiesce(struct mddev *mddev, int state) break; case 1: /* stop all writes */ - spin_lock_irq(&conf->device_lock); + lock_all_device_hash_locks_irq(conf); /* '2' tells resync/reshape to pause so that all * active stripes can drain */ conf->quiesce = 2; - wait_event_lock_irq(conf->wait_for_stripe, + wait_event_cmd(conf->wait_for_stripe, atomic_read(&conf->active_stripes) == 0 && atomic_read(&conf->active_aligned_reads) == 0, - conf->device_lock); + unlock_all_device_hash_locks_irq(conf), + lock_all_device_hash_locks_irq(conf)); conf->quiesce = 1; - spin_unlock_irq(&conf->device_lock); + unlock_all_device_hash_locks_irq(conf); /* allow reshape to continue */ wake_up(&conf->wait_for_overlap); break; case 0: /* re-enable writes */ - spin_lock_irq(&conf->device_lock); + lock_all_device_hash_locks_irq(conf); conf->quiesce = 0; wake_up(&conf->wait_for_stripe); wake_up(&conf->wait_for_overlap); - spin_unlock_irq(&conf->device_lock); + unlock_all_device_hash_locks_irq(conf); break; } } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2113ffa82c7a..01ad8ae8f578 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -49,7 +49,7 @@ * can't distinguish between a clean block that has been generated * from parity calculations, and a clean block that has been * successfully written to the spare ( or to parity when resyncing). - * To distingush these states we have a stripe bit STRIPE_INSYNC that + * To distinguish these states we have a stripe bit STRIPE_INSYNC that * is set whenever a write is scheduled to the spare, or to the parity * disc if there is no spare. A sync request clears this bit, and * when we find it set with no buffers locked, we know the sync is @@ -205,6 +205,7 @@ struct stripe_head { short pd_idx; /* parity disk index */ short qd_idx; /* 'Q' disk index for raid6 */ short ddf_layout;/* use DDF ordering to calculate Q */ + short hash_lock_index; unsigned long state; /* state flags */ atomic_t count; /* nr of active thread/requests */ int bm_seq; /* sequence number for bitmap flushes */ @@ -367,9 +368,18 @@ struct disk_info { struct md_rdev *rdev, *replacement; }; +/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. + * This is because we sometimes take all the spinlocks + * and creating that much locking depth can cause + * problems. + */ +#define NR_STRIPE_HASH_LOCKS 8 +#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1) + struct r5worker { struct work_struct work; struct r5worker_group *group; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; bool working; }; @@ -382,6 +392,8 @@ struct r5worker_group { struct r5conf { struct hlist_head *stripe_hashtbl; + /* only protect corresponding hash list and inactive_list */ + spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS]; struct mddev *mddev; int chunk_sectors; int level, algorithm; @@ -462,7 +474,8 @@ struct r5conf { * Free stripes pool */ atomic_t active_stripes; - struct list_head inactive_list; + struct list_head inactive_list[NR_STRIPE_HASH_LOCKS]; + atomic_t empty_inactive_list_nr; struct llist_head released_stripes; wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_overlap; @@ -477,6 +490,7 @@ struct r5conf { * the new thread here until we fully activate the array. */ struct md_thread *thread; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; struct r5worker_group *worker_groups; int group_cnt; int worker_cnt_per_group; diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c index f2199e43e803..185c285f70fc 100644 --- a/drivers/media/common/b2c2/flexcop-sram.c +++ b/drivers/media/common/b2c2/flexcop-sram.c @@ -85,7 +85,7 @@ static void flexcop_sram_write(struct adapter *adapter, u32 bank, u32 addr, u8 * while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { mdelay(1); retries--; - }; + } if (retries == 0) printk("%s: SRAM timeout\n", __func__); @@ -110,7 +110,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { mdelay(1); retries--; - }; + } if (retries == 0) printk("%s: SRAM timeout\n", __func__); @@ -122,7 +122,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { mdelay(1); retries--; - }; + } if (retries == 0) printk("%s: SRAM timeout\n", __func__); diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c index bb6ee5191eb1..34b0d0ddeef3 100644 --- a/drivers/media/common/saa7146/saa7146_core.c +++ b/drivers/media/common/saa7146/saa7146_core.c @@ -411,7 +411,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent saa7146_write(dev, MC2, 0xf8000000); /* request an interrupt for the saa7146 */ - err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED, + err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED, dev->name, dev); if (err < 0) { ERR("request_irq() failed\n"); @@ -524,8 +524,6 @@ static void saa7146_remove_one(struct pci_dev *pdev) DEB_EE("dev:%p\n", dev); dev->ext->detach(dev); - /* Zero the PCI drvdata after use. */ - pci_set_drvdata(pdev, NULL); /* shut down all video dma transfers */ saa7146_write(dev, MC1, 0x00ff0000); diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c index a142f7942a01..050984c5b1e3 100644 --- a/drivers/media/common/siano/smscoreapi.c +++ b/drivers/media/common/siano/smscoreapi.c @@ -922,8 +922,8 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, u32 i, *ptr; u8 *payload = firmware->payload; int rc = 0; - firmware->start_address = le32_to_cpu(firmware->start_address); - firmware->length = le32_to_cpu(firmware->length); + firmware->start_address = le32_to_cpup((__le32 *)&firmware->start_address); + firmware->length = le32_to_cpup((__le32 *)&firmware->length); mem_address = firmware->start_address; @@ -982,7 +982,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, if (rc < 0) goto exit_fw_download; - sms_err("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x", + sms_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x", calc_checksum); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_VALIDITY_REQ, sizeof(msg->x_msg_header) + @@ -1562,7 +1562,7 @@ void smscore_onresponse(struct smscore_device_t *coredev, { struct sms_msg_data *validity = (struct sms_msg_data *) phdr; - sms_err("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x", + sms_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x", validity->msg_data[0]); complete(&coredev->data_validity_done); break; diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c index 63676a8b024c..85151efdd94c 100644 --- a/drivers/media/common/siano/smsdvb-main.c +++ b/drivers/media/common/siano/smsdvb-main.c @@ -44,14 +44,14 @@ module_param_named(debug, sms_dbg, int, 0644); MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))"); -u32 sms_to_guard_interval_table[] = { +static u32 sms_to_guard_interval_table[] = { [0] = GUARD_INTERVAL_1_32, [1] = GUARD_INTERVAL_1_16, [2] = GUARD_INTERVAL_1_8, [3] = GUARD_INTERVAL_1_4, }; -u32 sms_to_code_rate_table[] = { +static u32 sms_to_code_rate_table[] = { [0] = FEC_1_2, [1] = FEC_2_3, [2] = FEC_3_4, @@ -60,14 +60,14 @@ u32 sms_to_code_rate_table[] = { }; -u32 sms_to_hierarchy_table[] = { +static u32 sms_to_hierarchy_table[] = { [0] = HIERARCHY_NONE, [1] = HIERARCHY_1, [2] = HIERARCHY_2, [3] = HIERARCHY_4, }; -u32 sms_to_modulation_table[] = { +static u32 sms_to_modulation_table[] = { [0] = QPSK, [1] = QAM_16, [2] = QAM_64, diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 3485655fa082..58de4410c525 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -476,7 +476,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, size_t count) { - spin_lock(&demux->lock); + unsigned long flags; + + spin_lock_irqsave(&demux->lock, flags); while (count--) { if (buf[0] == 0x47) @@ -484,7 +486,7 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, buf += 188; } - spin_unlock(&demux->lock); + spin_unlock_irqrestore(&demux->lock, flags); } EXPORT_SYMBOL(dvb_dmx_swfilter_packets); @@ -519,8 +521,9 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, { int p = 0, i, j; const u8 *q; + unsigned long flags; - spin_lock(&demux->lock); + spin_lock_irqsave(&demux->lock, flags); if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */ i = demux->tsbufp; @@ -564,7 +567,7 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, } bailout: - spin_unlock(&demux->lock); + spin_unlock_irqrestore(&demux->lock, flags); } void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) @@ -581,11 +584,13 @@ EXPORT_SYMBOL(dvb_dmx_swfilter_204); void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count) { - spin_lock(&demux->lock); + unsigned long flags; + + spin_lock_irqsave(&demux->lock, flags); demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK); - spin_unlock(&demux->lock); + spin_unlock_irqrestore(&demux->lock, flags); } EXPORT_SYMBOL(dvb_dmx_swfilter_raw); diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 0e2ec6f73b05..bddbab43a2df 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig @@ -200,6 +200,13 @@ config DVB_CX24116 help A DVB-S/S2 tuner module. Say Y when you want to support this frontend. +config DVB_CX24117 + tristate "Conexant CX24117 based" + depends on DVB_CORE && I2C + default m if !MEDIA_SUBDRV_AUTOSELECT + help + A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend. + config DVB_SI21XX tristate "Silicon Labs SI21XX based" depends on DVB_CORE && I2C diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile index cebc0faffab5..f9cb43d9aed9 100644 --- a/drivers/media/dvb-frontends/Makefile +++ b/drivers/media/dvb-frontends/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o obj-$(CONFIG_DVB_AF9013) += af9013.o obj-$(CONFIG_DVB_CX24116) += cx24116.o +obj-$(CONFIG_DVB_CX24117) += cx24117.o obj-$(CONFIG_DVB_SI21XX) += si21xx.o obj-$(CONFIG_DVB_STV0288) += stv0288.o obj-$(CONFIG_DVB_STB6000) += stb6000.o diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c index a204f2828820..fb504f1e9125 100644 --- a/drivers/media/dvb-frontends/af9013.c +++ b/drivers/media/dvb-frontends/af9013.c @@ -24,6 +24,9 @@ #include "af9013_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct af9013_state { struct i2c_adapter *i2c; struct dvb_frontend fe; @@ -50,16 +53,23 @@ static int af9013_wr_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg, const u8 *val, int len) { int ret; - u8 buf[3+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->config.i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 3 + len, .buf = buf, } }; + if (3 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = (reg >> 8) & 0xff; buf[1] = (reg >> 0) & 0xff; buf[2] = mbox; diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index a777b4b944eb..30ee59052157 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c @@ -21,6 +21,9 @@ #include "af9033_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct af9033_state { struct i2c_adapter *i2c; struct dvb_frontend fe; @@ -40,16 +43,23 @@ static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val, int len) { int ret; - u8 buf[3 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = state->cfg.i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 3 + len, .buf = buf, } }; + if (3 + len > sizeof(buf)) { + dev_warn(&state->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = (reg >> 16) & 0xff; buf[1] = (reg >> 8) & 0xff; buf[2] = (reg >> 0) & 0xff; @@ -161,7 +171,14 @@ static int af9033_wr_reg_val_tab(struct af9033_state *state, const struct reg_val *tab, int tab_len) { int ret, i, j; - u8 buf[tab_len]; + u8 buf[MAX_XFER_SIZE]; + + if (tab_len > sizeof(buf)) { + dev_warn(&state->i2c->dev, + "%s: i2c wr len=%d is too big!\n", + KBUILD_MODNAME, tab_len); + return -EINVAL; + } dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index 1b77909c0c71..39a29dd29519 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -44,6 +44,9 @@ #include "bcm3510.h" #include "bcm3510_priv.h" +/* Max transfer size done by bcm3510_do_hab_cmd() function */ +#define MAX_XFER_SIZE 128 + struct bcm3510_state { struct i2c_adapter* i2c; @@ -201,9 +204,19 @@ static int bcm3510_hab_send_request(struct bcm3510_state *st, u8 *buf, int len) static int bcm3510_do_hab_cmd(struct bcm3510_state *st, u8 cmd, u8 msgid, u8 *obuf, u8 olen, u8 *ibuf, u8 ilen) { - u8 ob[olen+2],ib[ilen+2]; + u8 ob[MAX_XFER_SIZE], ib[MAX_XFER_SIZE]; int ret = 0; + if (ilen + 2 > sizeof(ib)) { + deb_hab("do_hab_cmd: ilen=%d is too big!\n", ilen); + return -EINVAL; + } + + if (olen + 2 > sizeof(ob)) { + deb_hab("do_hab_cmd: olen=%d is too big!\n", olen); + return -EINVAL; + } + ob[0] = cmd; ob[1] = msgid; memcpy(&ob[2],obuf,olen); diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c index 0cd6927e654c..95b981cd7115 100644 --- a/drivers/media/dvb-frontends/cx24110.c +++ b/drivers/media/dvb-frontends/cx24110.c @@ -378,7 +378,7 @@ static int cx24110_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltag return cx24110_writereg(state,0x76,(cx24110_readreg(state,0x76)&0x3b)|0x40); default: return -EINVAL; - }; + } } static int cx24110_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst) diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c new file mode 100644 index 000000000000..476b422ccf19 --- /dev/null +++ b/drivers/media/dvb-frontends/cx24117.c @@ -0,0 +1,1650 @@ +/* + Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver + + Copyright (C) 2013 Luis Alves <ljalvs@gmail.com> + July, 6th 2013 + First release based on cx24116 driver by: + Steven Toth and Georg Acher, Darron Broad, Igor Liplianin + Cards currently supported: + TBS6980 - Dual DVBS/S2 PCIe card + TBS6981 - Dual DVBS/S2 PCIe card + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/firmware.h> + +#include "tuner-i2c.h" +#include "dvb_frontend.h" +#include "cx24117.h" + + +#define CX24117_DEFAULT_FIRMWARE "dvb-fe-cx24117.fw" +#define CX24117_SEARCH_RANGE_KHZ 5000 + +/* known registers */ +#define CX24117_REG_COMMAND (0x00) /* command buffer */ +#define CX24117_REG_EXECUTE (0x1f) /* execute command */ + +#define CX24117_REG_FREQ3_0 (0x34) /* frequency */ +#define CX24117_REG_FREQ2_0 (0x35) +#define CX24117_REG_FREQ1_0 (0x36) +#define CX24117_REG_STATE0 (0x39) +#define CX24117_REG_SSTATUS0 (0x3a) /* demod0 signal high / status */ +#define CX24117_REG_SIGNAL0 (0x3b) +#define CX24117_REG_FREQ5_0 (0x3c) /* +-freq */ +#define CX24117_REG_FREQ6_0 (0x3d) +#define CX24117_REG_SRATE2_0 (0x3e) /* +- 1000 * srate */ +#define CX24117_REG_SRATE1_0 (0x3f) +#define CX24117_REG_QUALITY2_0 (0x40) +#define CX24117_REG_QUALITY1_0 (0x41) + +#define CX24117_REG_BER4_0 (0x47) +#define CX24117_REG_BER3_0 (0x48) +#define CX24117_REG_BER2_0 (0x49) +#define CX24117_REG_BER1_0 (0x4a) +#define CX24117_REG_DVBS_UCB2_0 (0x4b) +#define CX24117_REG_DVBS_UCB1_0 (0x4c) +#define CX24117_REG_DVBS2_UCB2_0 (0x50) +#define CX24117_REG_DVBS2_UCB1_0 (0x51) +#define CX24117_REG_QSTATUS0 (0x93) +#define CX24117_REG_CLKDIV0 (0xe6) +#define CX24117_REG_RATEDIV0 (0xf0) + + +#define CX24117_REG_FREQ3_1 (0x55) /* frequency */ +#define CX24117_REG_FREQ2_1 (0x56) +#define CX24117_REG_FREQ1_1 (0x57) +#define CX24117_REG_STATE1 (0x5a) +#define CX24117_REG_SSTATUS1 (0x5b) /* demod1 signal high / status */ +#define CX24117_REG_SIGNAL1 (0x5c) +#define CX24117_REG_FREQ5_1 (0x5d) /* +- freq */ +#define CX24117_REG_FREQ4_1 (0x5e) +#define CX24117_REG_SRATE2_1 (0x5f) +#define CX24117_REG_SRATE1_1 (0x60) +#define CX24117_REG_QUALITY2_1 (0x61) +#define CX24117_REG_QUALITY1_1 (0x62) +#define CX24117_REG_BER4_1 (0x68) +#define CX24117_REG_BER3_1 (0x69) +#define CX24117_REG_BER2_1 (0x6a) +#define CX24117_REG_BER1_1 (0x6b) +#define CX24117_REG_DVBS_UCB2_1 (0x6c) +#define CX24117_REG_DVBS_UCB1_1 (0x6d) +#define CX24117_REG_DVBS2_UCB2_1 (0x71) +#define CX24117_REG_DVBS2_UCB1_1 (0x72) +#define CX24117_REG_QSTATUS1 (0x9f) +#define CX24117_REG_CLKDIV1 (0xe7) +#define CX24117_REG_RATEDIV1 (0xf1) + + +/* arg buffer size */ +#define CX24117_ARGLEN (0x1e) + +/* rolloff */ +#define CX24117_ROLLOFF_020 (0x00) +#define CX24117_ROLLOFF_025 (0x01) +#define CX24117_ROLLOFF_035 (0x02) + +/* pilot bit */ +#define CX24117_PILOT_OFF (0x00) +#define CX24117_PILOT_ON (0x40) +#define CX24117_PILOT_AUTO (0x80) + +/* signal status */ +#define CX24117_HAS_SIGNAL (0x01) +#define CX24117_HAS_CARRIER (0x02) +#define CX24117_HAS_VITERBI (0x04) +#define CX24117_HAS_SYNCLOCK (0x08) +#define CX24117_STATUS_MASK (0x0f) +#define CX24117_SIGNAL_MASK (0xc0) + + +/* arg offset for DiSEqC */ +#define CX24117_DISEQC_DEMOD (1) +#define CX24117_DISEQC_BURST (2) +#define CX24117_DISEQC_ARG3_2 (3) /* unknown value=2 */ +#define CX24117_DISEQC_ARG4_0 (4) /* unknown value=0 */ +#define CX24117_DISEQC_ARG5_0 (5) /* unknown value=0 */ +#define CX24117_DISEQC_MSGLEN (6) +#define CX24117_DISEQC_MSGOFS (7) + +/* DiSEqC burst */ +#define CX24117_DISEQC_MINI_A (0) +#define CX24117_DISEQC_MINI_B (1) + + +#define CX24117_PNE (0) /* 0 disabled / 2 enabled */ +#define CX24117_OCC (1) /* 0 disabled / 1 enabled */ + + +enum cmds { + CMD_SET_VCO = 0x10, + CMD_TUNEREQUEST = 0x11, + CMD_MPEGCONFIG = 0x13, + CMD_TUNERINIT = 0x14, + CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */ + CMD_LNBDCLEVEL = 0x22, + CMD_SET_TONE = 0x23, + CMD_UPDFWVERS = 0x35, + CMD_TUNERSLEEP = 0x36, +}; + +static LIST_HEAD(hybrid_tuner_instance_list); +static DEFINE_MUTEX(cx24117_list_mutex); + +/* The Demod/Tuner can't easily provide these, we cache them */ +struct cx24117_tuning { + u32 frequency; + u32 symbol_rate; + fe_spectral_inversion_t inversion; + fe_code_rate_t fec; + + fe_delivery_system_t delsys; + fe_modulation_t modulation; + fe_pilot_t pilot; + fe_rolloff_t rolloff; + + /* Demod values */ + u8 fec_val; + u8 fec_mask; + u8 inversion_val; + u8 pilot_val; + u8 rolloff_val; +}; + +/* Basic commands that are sent to the firmware */ +struct cx24117_cmd { + u8 len; + u8 args[CX24117_ARGLEN]; +}; + +/* common to both fe's */ +struct cx24117_priv { + u8 demod_address; + struct i2c_adapter *i2c; + u8 skip_fw_load; + struct mutex fe_lock; + + /* Used for sharing this struct between demods */ + struct tuner_i2c_props i2c_props; + struct list_head hybrid_tuner_instance_list; +}; + +/* one per each fe */ +struct cx24117_state { + struct cx24117_priv *priv; + struct dvb_frontend frontend; + + struct cx24117_tuning dcur; + struct cx24117_tuning dnxt; + struct cx24117_cmd dsec_cmd; + + int demod; +}; + +/* modfec (modulation and FEC) lookup table */ +/* Check cx24116.c for a detailed description of each field */ +static struct cx24117_modfec { + fe_delivery_system_t delivery_system; + fe_modulation_t modulation; + fe_code_rate_t fec; + u8 mask; /* In DVBS mode this is used to autodetect */ + u8 val; /* Passed to the firmware to indicate mode selection */ +} cx24117_modfec_modes[] = { + /* QPSK. For unknown rates we set hardware to auto detect 0xfe 0x30 */ + + /*mod fec mask val */ + { SYS_DVBS, QPSK, FEC_NONE, 0xfe, 0x30 }, + { SYS_DVBS, QPSK, FEC_1_2, 0x02, 0x2e }, /* 00000010 00101110 */ + { SYS_DVBS, QPSK, FEC_2_3, 0x04, 0x2f }, /* 00000100 00101111 */ + { SYS_DVBS, QPSK, FEC_3_4, 0x08, 0x30 }, /* 00001000 00110000 */ + { SYS_DVBS, QPSK, FEC_4_5, 0xfe, 0x30 }, /* 000?0000 ? */ + { SYS_DVBS, QPSK, FEC_5_6, 0x20, 0x31 }, /* 00100000 00110001 */ + { SYS_DVBS, QPSK, FEC_6_7, 0xfe, 0x30 }, /* 0?000000 ? */ + { SYS_DVBS, QPSK, FEC_7_8, 0x80, 0x32 }, /* 10000000 00110010 */ + { SYS_DVBS, QPSK, FEC_8_9, 0xfe, 0x30 }, /* 0000000? ? */ + { SYS_DVBS, QPSK, FEC_AUTO, 0xfe, 0x30 }, + /* NBC-QPSK */ + { SYS_DVBS2, QPSK, FEC_NONE, 0x00, 0x00 }, + { SYS_DVBS2, QPSK, FEC_1_2, 0x00, 0x04 }, + { SYS_DVBS2, QPSK, FEC_3_5, 0x00, 0x05 }, + { SYS_DVBS2, QPSK, FEC_2_3, 0x00, 0x06 }, + { SYS_DVBS2, QPSK, FEC_3_4, 0x00, 0x07 }, + { SYS_DVBS2, QPSK, FEC_4_5, 0x00, 0x08 }, + { SYS_DVBS2, QPSK, FEC_5_6, 0x00, 0x09 }, + { SYS_DVBS2, QPSK, FEC_8_9, 0x00, 0x0a }, + { SYS_DVBS2, QPSK, FEC_9_10, 0x00, 0x0b }, + { SYS_DVBS2, QPSK, FEC_AUTO, 0x00, 0x00 }, + /* 8PSK */ + { SYS_DVBS2, PSK_8, FEC_NONE, 0x00, 0x00 }, + { SYS_DVBS2, PSK_8, FEC_3_5, 0x00, 0x0c }, + { SYS_DVBS2, PSK_8, FEC_2_3, 0x00, 0x0d }, + { SYS_DVBS2, PSK_8, FEC_3_4, 0x00, 0x0e }, + { SYS_DVBS2, PSK_8, FEC_5_6, 0x00, 0x0f }, + { SYS_DVBS2, PSK_8, FEC_8_9, 0x00, 0x10 }, + { SYS_DVBS2, PSK_8, FEC_9_10, 0x00, 0x11 }, + { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00, 0x00 }, + /* + * 'val' can be found in the FECSTATUS register when tuning. + * FECSTATUS will give the actual FEC in use if tuning was successful. + */ +}; + + +static int cx24117_writereg(struct cx24117_state *state, u8 reg, u8 data) +{ + u8 buf[] = { reg, data }; + struct i2c_msg msg = { .addr = state->priv->demod_address, + .flags = 0, .buf = buf, .len = 2 }; + int ret; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d i2c wr @0x%02x=0x%02x\n", + __func__, state->demod, reg, data); + + ret = i2c_transfer(state->priv->i2c, &msg, 1); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c wr err(%i) @0x%02x=0x%02x\n", + KBUILD_MODNAME, state->demod, ret, reg, data); + return ret; + } + return 0; +} + +static int cx24117_writecmd(struct cx24117_state *state, + struct cx24117_cmd *cmd) +{ + struct i2c_msg msg; + u8 buf[CX24117_ARGLEN+1]; + int ret; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d i2c wr cmd len=%d\n", + __func__, state->demod, cmd->len); + + buf[0] = CX24117_REG_COMMAND; + memcpy(&buf[1], cmd->args, cmd->len); + + msg.addr = state->priv->demod_address; + msg.flags = 0; + msg.len = cmd->len+1; + msg.buf = buf; + ret = i2c_transfer(state->priv->i2c, &msg, 1); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c wr cmd err(%i) len=%d\n", + KBUILD_MODNAME, state->demod, ret, cmd->len); + return ret; + } + return 0; +} + +static int cx24117_readreg(struct cx24117_state *state, u8 reg) +{ + int ret; + u8 recv = 0; + struct i2c_msg msg[] = { + { .addr = state->priv->demod_address, .flags = 0, + .buf = ®, .len = 1 }, + { .addr = state->priv->demod_address, .flags = I2C_M_RD, + .buf = &recv, .len = 1 } + }; + + ret = i2c_transfer(state->priv->i2c, msg, 2); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c rd err(%d) @0x%x\n", + KBUILD_MODNAME, state->demod, ret, reg); + return ret; + } + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d i2c rd @0x%02x=0x%02x\n", + __func__, state->demod, reg, recv); + + return recv; +} + +static int cx24117_readregN(struct cx24117_state *state, + u8 reg, u8 *buf, int len) +{ + int ret; + struct i2c_msg msg[] = { + { .addr = state->priv->demod_address, .flags = 0, + .buf = ®, .len = 1 }, + { .addr = state->priv->demod_address, .flags = I2C_M_RD, + .buf = buf, .len = len } + }; + + ret = i2c_transfer(state->priv->i2c, msg, 2); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c rd err(%d) @0x%x\n", + KBUILD_MODNAME, state->demod, ret, reg); + return ret; + } + return 0; +} + +static int cx24117_set_inversion(struct cx24117_state *state, + fe_spectral_inversion_t inversion) +{ + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n", + __func__, inversion, state->demod); + + switch (inversion) { + case INVERSION_OFF: + state->dnxt.inversion_val = 0x00; + break; + case INVERSION_ON: + state->dnxt.inversion_val = 0x04; + break; + case INVERSION_AUTO: + state->dnxt.inversion_val = 0x0C; + break; + default: + return -EINVAL; + } + + state->dnxt.inversion = inversion; + + return 0; +} + +static int cx24117_lookup_fecmod(struct cx24117_state *state, + fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f) +{ + int i, ret = -EINVAL; + + dev_dbg(&state->priv->i2c->dev, + "%s(demod(0x%02x,0x%02x) demod%d\n", + __func__, m, f, state->demod); + + for (i = 0; i < ARRAY_SIZE(cx24117_modfec_modes); i++) { + if ((d == cx24117_modfec_modes[i].delivery_system) && + (m == cx24117_modfec_modes[i].modulation) && + (f == cx24117_modfec_modes[i].fec)) { + ret = i; + break; + } + } + + return ret; +} + +static int cx24117_set_fec(struct cx24117_state *state, + fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec) +{ + int ret; + + dev_dbg(&state->priv->i2c->dev, + "%s(0x%02x,0x%02x) demod%d\n", + __func__, mod, fec, state->demod); + + ret = cx24117_lookup_fecmod(state, delsys, mod, fec); + if (ret < 0) + return ret; + + state->dnxt.fec = fec; + state->dnxt.fec_val = cx24117_modfec_modes[ret].val; + state->dnxt.fec_mask = cx24117_modfec_modes[ret].mask; + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d mask/val = 0x%02x/0x%02x\n", __func__, + state->demod, state->dnxt.fec_mask, state->dnxt.fec_val); + + return 0; +} + +static int cx24117_set_symbolrate(struct cx24117_state *state, u32 rate) +{ + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n", + __func__, rate, state->demod); + + state->dnxt.symbol_rate = rate; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d symbol_rate = %d\n", + __func__, state->demod, rate); + + return 0; +} + +static int cx24117_load_firmware(struct dvb_frontend *fe, + const struct firmware *fw); + +static int cx24117_firmware_ondemand(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + const struct firmware *fw; + int ret = 0; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d skip_fw_load=%d\n", + __func__, state->demod, state->priv->skip_fw_load); + + if (state->priv->skip_fw_load) + return 0; + + /* check if firmware if already running */ + if (cx24117_readreg(state, 0xeb) != 0xa) { + /* Load firmware */ + /* request the firmware, this will block until loaded */ + dev_dbg(&state->priv->i2c->dev, + "%s: Waiting for firmware upload (%s)...\n", + __func__, CX24117_DEFAULT_FIRMWARE); + ret = request_firmware(&fw, CX24117_DEFAULT_FIRMWARE, + state->priv->i2c->dev.parent); + dev_dbg(&state->priv->i2c->dev, + "%s: Waiting for firmware upload(2)...\n", __func__); + if (ret) { + dev_err(&state->priv->i2c->dev, + "%s: No firmware uploaded " + "(timeout or file not found?)\n", __func__); + return ret; + } + + /* Make sure we don't recurse back through here + * during loading */ + state->priv->skip_fw_load = 1; + + ret = cx24117_load_firmware(fe, fw); + if (ret) + dev_err(&state->priv->i2c->dev, + "%s: Writing firmware failed\n", __func__); + release_firmware(fw); + + dev_info(&state->priv->i2c->dev, + "%s: Firmware upload %s\n", __func__, + ret == 0 ? "complete" : "failed"); + + /* Ensure firmware is always loaded if required */ + state->priv->skip_fw_load = 0; + } + + return ret; +} + +/* Take a basic firmware command structure, format it + * and forward it for processing + */ +static int cx24117_cmd_execute_nolock(struct dvb_frontend *fe, + struct cx24117_cmd *cmd) +{ + struct cx24117_state *state = fe->demodulator_priv; + int i, ret; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + /* Load the firmware if required */ + ret = cx24117_firmware_ondemand(fe); + if (ret != 0) + return ret; + + /* Write the command */ + cx24117_writecmd(state, cmd); + + /* Start execution and wait for cmd to terminate */ + cx24117_writereg(state, CX24117_REG_EXECUTE, 0x01); + i = 0; + while (cx24117_readreg(state, CX24117_REG_EXECUTE)) { + msleep(20); + if (i++ > 40) { + /* Avoid looping forever if the firmware does + not respond */ + dev_warn(&state->priv->i2c->dev, + "%s() Firmware not responding\n", __func__); + return -EIO; + } + } + return 0; +} + +static int cx24117_cmd_execute(struct dvb_frontend *fe, struct cx24117_cmd *cmd) +{ + struct cx24117_state *state = fe->demodulator_priv; + int ret; + + mutex_lock(&state->priv->fe_lock); + ret = cx24117_cmd_execute_nolock(fe, cmd); + mutex_unlock(&state->priv->fe_lock); + + return ret; +} + +static int cx24117_load_firmware(struct dvb_frontend *fe, + const struct firmware *fw) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int i, ret; + unsigned char vers[4]; + + struct i2c_msg msg; + u8 *buf; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d FW is %zu bytes (%02x %02x .. %02x %02x)\n", + __func__, state->demod, fw->size, fw->data[0], fw->data[1], + fw->data[fw->size - 2], fw->data[fw->size - 1]); + + cx24117_writereg(state, 0xea, 0x00); + cx24117_writereg(state, 0xea, 0x01); + cx24117_writereg(state, 0xea, 0x00); + + cx24117_writereg(state, 0xce, 0x92); + + cx24117_writereg(state, 0xfb, 0x00); + cx24117_writereg(state, 0xfc, 0x00); + + cx24117_writereg(state, 0xc3, 0x04); + cx24117_writereg(state, 0xc4, 0x04); + + cx24117_writereg(state, 0xce, 0x00); + cx24117_writereg(state, 0xcf, 0x00); + + cx24117_writereg(state, 0xea, 0x00); + cx24117_writereg(state, 0xeb, 0x0c); + cx24117_writereg(state, 0xec, 0x06); + cx24117_writereg(state, 0xed, 0x05); + cx24117_writereg(state, 0xee, 0x03); + cx24117_writereg(state, 0xef, 0x05); + + cx24117_writereg(state, 0xf3, 0x03); + cx24117_writereg(state, 0xf4, 0x44); + + cx24117_writereg(state, CX24117_REG_RATEDIV0, 0x04); + cx24117_writereg(state, CX24117_REG_CLKDIV0, 0x02); + + cx24117_writereg(state, CX24117_REG_RATEDIV1, 0x04); + cx24117_writereg(state, CX24117_REG_CLKDIV1, 0x02); + + cx24117_writereg(state, 0xf2, 0x04); + cx24117_writereg(state, 0xe8, 0x02); + cx24117_writereg(state, 0xea, 0x01); + cx24117_writereg(state, 0xc8, 0x00); + cx24117_writereg(state, 0xc9, 0x00); + cx24117_writereg(state, 0xca, 0x00); + cx24117_writereg(state, 0xcb, 0x00); + cx24117_writereg(state, 0xcc, 0x00); + cx24117_writereg(state, 0xcd, 0x00); + cx24117_writereg(state, 0xe4, 0x03); + cx24117_writereg(state, 0xeb, 0x0a); + + cx24117_writereg(state, 0xfb, 0x00); + cx24117_writereg(state, 0xe0, 0x76); + cx24117_writereg(state, 0xf7, 0x81); + cx24117_writereg(state, 0xf8, 0x00); + cx24117_writereg(state, 0xf9, 0x00); + + buf = kmalloc(fw->size + 1, GFP_KERNEL); + if (buf == NULL) { + state->priv->skip_fw_load = 0; + return -ENOMEM; + } + + /* fw upload reg */ + buf[0] = 0xfa; + memcpy(&buf[1], fw->data, fw->size); + + /* prepare i2c message to send */ + msg.addr = state->priv->demod_address; + msg.flags = 0; + msg.len = fw->size + 1; + msg.buf = buf; + + /* send fw */ + ret = i2c_transfer(state->priv->i2c, &msg, 1); + if (ret < 0) + return ret; + + kfree(buf); + + cx24117_writereg(state, 0xf7, 0x0c); + cx24117_writereg(state, 0xe0, 0x00); + + /* CMD 1B */ + cmd.args[0] = 0x1b; + cmd.args[1] = 0x00; + cmd.args[2] = 0x01; + cmd.args[3] = 0x00; + cmd.len = 4; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 10 */ + cmd.args[0] = CMD_SET_VCO; + cmd.args[1] = 0x06; + cmd.args[2] = 0x2b; + cmd.args[3] = 0xd8; + cmd.args[4] = 0xa5; + cmd.args[5] = 0xee; + cmd.args[6] = 0x03; + cmd.args[7] = 0x9d; + cmd.args[8] = 0xfc; + cmd.args[9] = 0x06; + cmd.args[10] = 0x02; + cmd.args[11] = 0x9d; + cmd.args[12] = 0xfc; + cmd.len = 13; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 15 */ + cmd.args[0] = 0x15; + cmd.args[1] = 0x00; + cmd.args[2] = 0x01; + cmd.args[3] = 0x00; + cmd.args[4] = 0x00; + cmd.args[5] = 0x01; + cmd.args[6] = 0x01; + cmd.args[7] = 0x01; + cmd.args[8] = 0x00; + cmd.args[9] = 0x05; + cmd.args[10] = 0x02; + cmd.args[11] = 0x02; + cmd.args[12] = 0x00; + cmd.len = 13; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 13 */ + cmd.args[0] = CMD_MPEGCONFIG; + cmd.args[1] = 0x00; + cmd.args[2] = 0x00; + cmd.args[3] = 0x00; + cmd.args[4] = 0x01; + cmd.args[5] = 0x00; + cmd.len = 6; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 14 */ + for (i = 0; i < 2; i++) { + cmd.args[0] = CMD_TUNERINIT; + cmd.args[1] = (u8) i; + cmd.args[2] = 0x00; + cmd.args[3] = 0x05; + cmd.args[4] = 0x00; + cmd.args[5] = 0x00; + cmd.args[6] = 0x55; + cmd.args[7] = 0x00; + cmd.len = 8; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + } + + cx24117_writereg(state, 0xce, 0xc0); + cx24117_writereg(state, 0xcf, 0x00); + cx24117_writereg(state, 0xe5, 0x04); + + /* Firmware CMD 35: Get firmware version */ + cmd.args[0] = CMD_UPDFWVERS; + cmd.len = 2; + for (i = 0; i < 4; i++) { + cmd.args[1] = i; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + vers[i] = cx24117_readreg(state, 0x33); + } + dev_info(&state->priv->i2c->dev, + "%s: FW version %i.%i.%i.%i\n", __func__, + vers[0], vers[1], vers[2], vers[3]); + return 0; +error: + state->priv->skip_fw_load = 0; + dev_err(&state->priv->i2c->dev, "%s() Error running FW.\n", __func__); + return ret; +} + +static int cx24117_read_status(struct dvb_frontend *fe, fe_status_t *status) +{ + struct cx24117_state *state = fe->demodulator_priv; + int lock; + + lock = cx24117_readreg(state, + (state->demod == 0) ? CX24117_REG_SSTATUS0 : + CX24117_REG_SSTATUS1) & + CX24117_STATUS_MASK; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d status = 0x%02x\n", + __func__, state->demod, lock); + + *status = 0; + + if (lock & CX24117_HAS_SIGNAL) + *status |= FE_HAS_SIGNAL; + if (lock & CX24117_HAS_CARRIER) + *status |= FE_HAS_CARRIER; + if (lock & CX24117_HAS_VITERBI) + *status |= FE_HAS_VITERBI; + if (lock & CX24117_HAS_SYNCLOCK) + *status |= FE_HAS_SYNC | FE_HAS_LOCK; + + return 0; +} + +static int cx24117_read_ber(struct dvb_frontend *fe, u32 *ber) +{ + struct cx24117_state *state = fe->demodulator_priv; + int ret; + u8 buf[4]; + u8 base_reg = (state->demod == 0) ? + CX24117_REG_BER4_0 : + CX24117_REG_BER4_1; + + ret = cx24117_readregN(state, base_reg, buf, 4); + if (ret != 0) + return ret; + + *ber = (buf[0] << 24) | (buf[1] << 16) | + (buf[1] << 8) | buf[0]; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d ber=0x%04x\n", + __func__, state->demod, *ber); + + return 0; +} + +static int cx24117_read_signal_strength(struct dvb_frontend *fe, + u16 *signal_strength) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + u16 sig_reading; + u8 buf[2]; + u8 reg = (state->demod == 0) ? + CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1; + + /* Firmware CMD 1A */ + cmd.args[0] = 0x1a; + cmd.args[1] = (u8) state->demod; + cmd.len = 2; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + return ret; + + ret = cx24117_readregN(state, reg, buf, 2); + if (ret != 0) + return ret; + sig_reading = ((buf[0] & CX24117_SIGNAL_MASK) << 2) | buf[1]; + + *signal_strength = -100 * sig_reading + 94324; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d raw / cooked = 0x%04x / 0x%04x\n", + __func__, state->demod, sig_reading, *signal_strength); + + return 0; +} + +static int cx24117_read_snr(struct dvb_frontend *fe, u16 *snr) +{ + struct cx24117_state *state = fe->demodulator_priv; + int ret; + u8 buf[2]; + u8 reg = (state->demod == 0) ? + CX24117_REG_QUALITY2_0 : CX24117_REG_QUALITY2_1; + + ret = cx24117_readregN(state, reg, buf, 2); + if (ret != 0) + return ret; + + *snr = (buf[0] << 8) | buf[1]; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d snr = 0x%04x\n", + __func__, state->demod, *snr); + + return ret; +} + +static int cx24117_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) +{ + struct cx24117_state *state = fe->demodulator_priv; + fe_delivery_system_t delsys = fe->dtv_property_cache.delivery_system; + int ret; + u8 buf[2]; + u8 reg = (state->demod == 0) ? + CX24117_REG_DVBS_UCB2_0 : + CX24117_REG_DVBS_UCB2_1; + + switch (delsys) { + case SYS_DVBS: + break; + case SYS_DVBS2: + reg += (CX24117_REG_DVBS2_UCB2_0 - CX24117_REG_DVBS_UCB2_0); + break; + default: + return -EINVAL; + } + + ret = cx24117_readregN(state, reg, buf, 2); + if (ret != 0) + return ret; + *ucblocks = (buf[0] << 8) | buf[1]; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d ucb=0x%04x\n", + __func__, state->demod, *ucblocks); + + return 0; +} + +/* Overwrite the current tuning params, we are about to tune */ +static void cx24117_clone_params(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + state->dcur = state->dnxt; +} + +/* Wait for LNB */ +static int cx24117_wait_for_lnb(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + int i; + u8 val, reg = (state->demod == 0) ? CX24117_REG_QSTATUS0 : + CX24117_REG_QSTATUS1; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d qstatus = 0x%02x\n", + __func__, state->demod, cx24117_readreg(state, reg)); + + /* Wait for up to 300 ms */ + for (i = 0; i < 10; i++) { + val = cx24117_readreg(state, reg) & 0x01; + if (val != 0) + return 0; + msleep(30); + } + + dev_warn(&state->priv->i2c->dev, "%s: demod%d LNB not ready\n", + KBUILD_MODNAME, state->demod); + + return -ETIMEDOUT; /* -EBUSY ? */ +} + +static int cx24117_set_voltage(struct dvb_frontend *fe, + fe_sec_voltage_t voltage) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + u8 reg = (state->demod == 0) ? 0x10 : 0x20; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d %s\n", + __func__, state->demod, + voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : + voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : + "SEC_VOLTAGE_OFF"); + + /* CMD 32 */ + cmd.args[0] = 0x32; + cmd.args[1] = reg; + cmd.args[2] = reg; + cmd.len = 3; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret) + return ret; + + if ((voltage == SEC_VOLTAGE_13) || + (voltage == SEC_VOLTAGE_18)) { + /* CMD 33 */ + cmd.args[0] = 0x33; + cmd.args[1] = reg; + cmd.args[2] = reg; + cmd.len = 3; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + return ret; + + ret = cx24117_wait_for_lnb(fe); + if (ret != 0) + return ret; + + /* Wait for voltage/min repeat delay */ + msleep(100); + + /* CMD 22 - CMD_LNBDCLEVEL */ + cmd.args[0] = CMD_LNBDCLEVEL; + cmd.args[1] = state->demod ? 0 : 1; + cmd.args[2] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00); + cmd.len = 3; + + /* Min delay time before DiSEqC send */ + msleep(20); + } else { + cmd.args[0] = 0x33; + cmd.args[1] = 0x00; + cmd.args[2] = reg; + cmd.len = 3; + } + + return cx24117_cmd_execute(fe, &cmd); +} + +static int cx24117_set_tone(struct dvb_frontend *fe, + fe_sec_tone_mode_t tone) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n", + __func__, state->demod, tone); + if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) { + dev_warn(&state->priv->i2c->dev, "%s: demod%d invalid tone=%d\n", + KBUILD_MODNAME, state->demod, tone); + return -EINVAL; + } + + /* Wait for LNB ready */ + ret = cx24117_wait_for_lnb(fe); + if (ret != 0) + return ret; + + /* Min delay time after DiSEqC send */ + msleep(20); + + /* Set the tone */ + /* CMD 23 - CMD_SET_TONE */ + cmd.args[0] = CMD_SET_TONE; + cmd.args[1] = (state->demod ? 0 : 1); + cmd.args[2] = 0x00; + cmd.args[3] = 0x00; + cmd.len = 5; + switch (tone) { + case SEC_TONE_ON: + cmd.args[4] = 0x01; + break; + case SEC_TONE_OFF: + cmd.args[4] = 0x00; + break; + } + + msleep(20); + + return cx24117_cmd_execute(fe, &cmd); +} + +/* Initialise DiSEqC */ +static int cx24117_diseqc_init(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + + /* Prepare a DiSEqC command */ + state->dsec_cmd.args[0] = CMD_LNBSEND; + + /* demod */ + state->dsec_cmd.args[CX24117_DISEQC_DEMOD] = state->demod ? 0 : 1; + + /* DiSEqC burst */ + state->dsec_cmd.args[CX24117_DISEQC_BURST] = CX24117_DISEQC_MINI_A; + + /* Unknown */ + state->dsec_cmd.args[CX24117_DISEQC_ARG3_2] = 0x02; + state->dsec_cmd.args[CX24117_DISEQC_ARG4_0] = 0x00; + + /* Continuation flag? */ + state->dsec_cmd.args[CX24117_DISEQC_ARG5_0] = 0x00; + + /* DiSEqC message length */ + state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = 0x00; + + /* Command length */ + state->dsec_cmd.len = 7; + + return 0; +} + +/* Send DiSEqC message */ +static int cx24117_send_diseqc_msg(struct dvb_frontend *fe, + struct dvb_diseqc_master_cmd *d) +{ + struct cx24117_state *state = fe->demodulator_priv; + int i, ret; + + /* Dump DiSEqC message */ + dev_dbg(&state->priv->i2c->dev, "%s: demod %d (", + __func__, state->demod); + for (i = 0; i < d->msg_len; i++) + dev_dbg(&state->priv->i2c->dev, "0x%02x ", d->msg[i]); + dev_dbg(&state->priv->i2c->dev, ")\n"); + + /* Validate length */ + if (d->msg_len > 15) + return -EINVAL; + + /* DiSEqC message */ + for (i = 0; i < d->msg_len; i++) + state->dsec_cmd.args[CX24117_DISEQC_MSGOFS + i] = d->msg[i]; + + /* DiSEqC message length */ + state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = d->msg_len; + + /* Command length */ + state->dsec_cmd.len = CX24117_DISEQC_MSGOFS + + state->dsec_cmd.args[CX24117_DISEQC_MSGLEN]; + + /* + * Message is sent with derived else cached burst + * + * WRITE PORT GROUP COMMAND 38 + * + * 0/A/A: E0 10 38 F0..F3 + * 1/B/B: E0 10 38 F4..F7 + * 2/C/A: E0 10 38 F8..FB + * 3/D/B: E0 10 38 FC..FF + * + * databyte[3]= 8421:8421 + * ABCD:WXYZ + * CLR :SET + * + * WX= PORT SELECT 0..3 (X=TONEBURST) + * Y = VOLTAGE (0=13V, 1=18V) + * Z = BAND (0=LOW, 1=HIGH(22K)) + */ + if (d->msg_len >= 4 && d->msg[2] == 0x38) + state->dsec_cmd.args[CX24117_DISEQC_BURST] = + ((d->msg[3] & 4) >> 2); + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d burst=%d\n", + __func__, state->demod, + state->dsec_cmd.args[CX24117_DISEQC_BURST]); + + /* Wait for LNB ready */ + ret = cx24117_wait_for_lnb(fe); + if (ret != 0) + return ret; + + /* Wait for voltage/min repeat delay */ + msleep(100); + + /* Command */ + ret = cx24117_cmd_execute(fe, &state->dsec_cmd); + if (ret != 0) + return ret; + /* + * Wait for send + * + * Eutelsat spec: + * >15ms delay + (XXX determine if FW does this, see set_tone) + * 13.5ms per byte + + * >15ms delay + + * 12.5ms burst + + * >15ms delay (XXX determine if FW does this, see set_tone) + */ + msleep((state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] << 4) + 60); + + return 0; +} + +/* Send DiSEqC burst */ +static int cx24117_diseqc_send_burst(struct dvb_frontend *fe, + fe_sec_mini_cmd_t burst) +{ + struct cx24117_state *state = fe->demodulator_priv; + + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod=%d\n", + __func__, burst, state->demod); + + /* DiSEqC burst */ + if (burst == SEC_MINI_A) + state->dsec_cmd.args[CX24117_DISEQC_BURST] = + CX24117_DISEQC_MINI_A; + else if (burst == SEC_MINI_B) + state->dsec_cmd.args[CX24117_DISEQC_BURST] = + CX24117_DISEQC_MINI_B; + else + return -EINVAL; + + return 0; +} + +static int cx24117_get_priv(struct cx24117_priv **priv, + struct i2c_adapter *i2c, u8 client_address) +{ + int ret; + + mutex_lock(&cx24117_list_mutex); + ret = hybrid_tuner_request_state(struct cx24117_priv, (*priv), + hybrid_tuner_instance_list, i2c, client_address, "cx24117"); + mutex_unlock(&cx24117_list_mutex); + + return ret; +} + +static void cx24117_release_priv(struct cx24117_priv *priv) +{ + mutex_lock(&cx24117_list_mutex); + if (priv != NULL) + hybrid_tuner_release_state(priv); + mutex_unlock(&cx24117_list_mutex); +} + +static void cx24117_release(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + dev_dbg(&state->priv->i2c->dev, "%s demod%d\n", + __func__, state->demod); + cx24117_release_priv(state->priv); + kfree(state); +} + +static struct dvb_frontend_ops cx24117_ops; + +struct dvb_frontend *cx24117_attach(const struct cx24117_config *config, + struct i2c_adapter *i2c) +{ + struct cx24117_state *state = NULL; + struct cx24117_priv *priv = NULL; + int demod = 0; + + /* get the common data struct for both demods */ + demod = cx24117_get_priv(&priv, i2c, config->demod_address); + + switch (demod) { + case 0: + dev_err(&state->priv->i2c->dev, + "%s: Error attaching frontend %d\n", + KBUILD_MODNAME, demod); + goto error1; + break; + case 1: + /* new priv instance */ + priv->i2c = i2c; + priv->demod_address = config->demod_address; + mutex_init(&priv->fe_lock); + break; + default: + /* existing priv instance */ + break; + } + + /* allocate memory for the internal state */ + state = kzalloc(sizeof(struct cx24117_state), GFP_KERNEL); + if (state == NULL) + goto error2; + + state->demod = demod - 1; + state->priv = priv; + + /* test i2c bus for ack */ + if (demod == 0) { + if (cx24117_readreg(state, 0x00) < 0) + goto error3; + } + + dev_info(&state->priv->i2c->dev, + "%s: Attaching frontend %d\n", + KBUILD_MODNAME, state->demod); + + /* create dvb_frontend */ + memcpy(&state->frontend.ops, &cx24117_ops, + sizeof(struct dvb_frontend_ops)); + state->frontend.demodulator_priv = state; + return &state->frontend; + +error3: + kfree(state); +error2: + cx24117_release_priv(priv); +error1: + return NULL; +} +EXPORT_SYMBOL_GPL(cx24117_attach); + +/* + * Initialise or wake up device + * + * Power config will reset and load initial firmware if required + */ +static int cx24117_initfe(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + mutex_lock(&state->priv->fe_lock); + + /* Firmware CMD 36: Power config */ + cmd.args[0] = CMD_TUNERSLEEP; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = 0; + cmd.len = 3; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto exit; + + ret = cx24117_diseqc_init(fe); + if (ret != 0) + goto exit; + + /* CMD 3C */ + cmd.args[0] = 0x3c; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = 0x10; + cmd.args[3] = 0x10; + cmd.len = 4; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto exit; + + /* CMD 34 */ + cmd.args[0] = 0x34; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = CX24117_OCC; + cmd.len = 3; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + +exit: + mutex_unlock(&state->priv->fe_lock); + + return ret; +} + +/* + * Put device to sleep + */ +static int cx24117_sleep(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + /* Firmware CMD 36: Power config */ + cmd.args[0] = CMD_TUNERSLEEP; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = 1; + cmd.len = 3; + return cx24117_cmd_execute(fe, &cmd); +} + +/* dvb-core told us to tune, the tv property cache will be complete, + * it's safe for is to pull values and use them for tuning purposes. + */ +static int cx24117_set_frontend(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct cx24117_cmd cmd; + fe_status_t tunerstat; + int i, status, ret, retune = 1; + u8 reg_clkdiv, reg_ratediv; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + switch (c->delivery_system) { + case SYS_DVBS: + dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S\n", + __func__, state->demod); + + /* Only QPSK is supported for DVB-S */ + if (c->modulation != QPSK) { + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d unsupported modulation (%d)\n", + __func__, state->demod, c->modulation); + return -EINVAL; + } + + /* Pilot doesn't exist in DVB-S, turn bit off */ + state->dnxt.pilot_val = CX24117_PILOT_OFF; + + /* DVB-S only supports 0.35 */ + state->dnxt.rolloff_val = CX24117_ROLLOFF_035; + break; + + case SYS_DVBS2: + dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S2\n", + __func__, state->demod); + + /* + * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2, + * but not hardware auto detection + */ + if (c->modulation != PSK_8 && c->modulation != QPSK) { + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d unsupported modulation (%d)\n", + __func__, state->demod, c->modulation); + return -EOPNOTSUPP; + } + + switch (c->pilot) { + case PILOT_AUTO: + state->dnxt.pilot_val = CX24117_PILOT_AUTO; + break; + case PILOT_OFF: + state->dnxt.pilot_val = CX24117_PILOT_OFF; + break; + case PILOT_ON: + state->dnxt.pilot_val = CX24117_PILOT_ON; + break; + default: + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d unsupported pilot mode (%d)\n", + __func__, state->demod, c->pilot); + return -EOPNOTSUPP; + } + + switch (c->rolloff) { + case ROLLOFF_20: + state->dnxt.rolloff_val = CX24117_ROLLOFF_020; + break; + case ROLLOFF_25: + state->dnxt.rolloff_val = CX24117_ROLLOFF_025; + break; + case ROLLOFF_35: + state->dnxt.rolloff_val = CX24117_ROLLOFF_035; + break; + case ROLLOFF_AUTO: + state->dnxt.rolloff_val = CX24117_ROLLOFF_035; + /* soft-auto rolloff */ + retune = 3; + break; + default: + dev_warn(&state->priv->i2c->dev, + "%s: demod%d unsupported rolloff (%d)\n", + KBUILD_MODNAME, state->demod, c->rolloff); + return -EOPNOTSUPP; + } + break; + + default: + dev_warn(&state->priv->i2c->dev, + "%s: demod %d unsupported delivery system (%d)\n", + KBUILD_MODNAME, state->demod, c->delivery_system); + return -EINVAL; + } + + state->dnxt.delsys = c->delivery_system; + state->dnxt.modulation = c->modulation; + state->dnxt.frequency = c->frequency; + state->dnxt.pilot = c->pilot; + state->dnxt.rolloff = c->rolloff; + + ret = cx24117_set_inversion(state, c->inversion); + if (ret != 0) + return ret; + + ret = cx24117_set_fec(state, + c->delivery_system, c->modulation, c->fec_inner); + if (ret != 0) + return ret; + + ret = cx24117_set_symbolrate(state, c->symbol_rate); + if (ret != 0) + return ret; + + /* discard the 'current' tuning parameters and prepare to tune */ + cx24117_clone_params(fe); + + dev_dbg(&state->priv->i2c->dev, + "%s: delsys = %d\n", __func__, state->dcur.delsys); + dev_dbg(&state->priv->i2c->dev, + "%s: modulation = %d\n", __func__, state->dcur.modulation); + dev_dbg(&state->priv->i2c->dev, + "%s: frequency = %d\n", __func__, state->dcur.frequency); + dev_dbg(&state->priv->i2c->dev, + "%s: pilot = %d (val = 0x%02x)\n", __func__, + state->dcur.pilot, state->dcur.pilot_val); + dev_dbg(&state->priv->i2c->dev, + "%s: retune = %d\n", __func__, retune); + dev_dbg(&state->priv->i2c->dev, + "%s: rolloff = %d (val = 0x%02x)\n", __func__, + state->dcur.rolloff, state->dcur.rolloff_val); + dev_dbg(&state->priv->i2c->dev, + "%s: symbol_rate = %d\n", __func__, state->dcur.symbol_rate); + dev_dbg(&state->priv->i2c->dev, + "%s: FEC = %d (mask/val = 0x%02x/0x%02x)\n", __func__, + state->dcur.fec, state->dcur.fec_mask, state->dcur.fec_val); + dev_dbg(&state->priv->i2c->dev, + "%s: Inversion = %d (val = 0x%02x)\n", __func__, + state->dcur.inversion, state->dcur.inversion_val); + + /* Prepare a tune request */ + cmd.args[0] = CMD_TUNEREQUEST; + + /* demod */ + cmd.args[1] = state->demod; + + /* Frequency */ + cmd.args[2] = (state->dcur.frequency & 0xff0000) >> 16; + cmd.args[3] = (state->dcur.frequency & 0x00ff00) >> 8; + cmd.args[4] = (state->dcur.frequency & 0x0000ff); + + /* Symbol Rate */ + cmd.args[5] = ((state->dcur.symbol_rate / 1000) & 0xff00) >> 8; + cmd.args[6] = ((state->dcur.symbol_rate / 1000) & 0x00ff); + + /* Automatic Inversion */ + cmd.args[7] = state->dcur.inversion_val; + + /* Modulation / FEC / Pilot */ + cmd.args[8] = state->dcur.fec_val | state->dcur.pilot_val; + + cmd.args[9] = CX24117_SEARCH_RANGE_KHZ >> 8; + cmd.args[10] = CX24117_SEARCH_RANGE_KHZ & 0xff; + + cmd.args[11] = state->dcur.rolloff_val; + cmd.args[12] = state->dcur.fec_mask; + + if (state->dcur.symbol_rate > 30000000) { + reg_ratediv = 0x04; + reg_clkdiv = 0x02; + } else if (state->dcur.symbol_rate > 10000000) { + reg_ratediv = 0x06; + reg_clkdiv = 0x03; + } else { + reg_ratediv = 0x0a; + reg_clkdiv = 0x05; + } + + cmd.args[13] = reg_ratediv; + cmd.args[14] = reg_clkdiv; + + cx24117_writereg(state, (state->demod == 0) ? + CX24117_REG_CLKDIV0 : CX24117_REG_CLKDIV1, reg_clkdiv); + cx24117_writereg(state, (state->demod == 0) ? + CX24117_REG_RATEDIV0 : CX24117_REG_RATEDIV1, reg_ratediv); + + cmd.args[15] = CX24117_PNE; + cmd.len = 16; + + do { + /* Reset status register */ + status = cx24117_readreg(state, (state->demod == 0) ? + CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1) & + CX24117_SIGNAL_MASK; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d status_setfe = %02x\n", + __func__, state->demod, status); + + cx24117_writereg(state, (state->demod == 0) ? + CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1, status); + + /* Tune */ + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + break; + + /* + * Wait for up to 500 ms before retrying + * + * If we are able to tune then generally it occurs within 100ms. + * If it takes longer, try a different rolloff setting. + */ + for (i = 0; i < 50; i++) { + cx24117_read_status(fe, &tunerstat); + status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC); + if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) { + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d tuned\n", + __func__, state->demod); + return 0; + } + msleep(20); + } + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d not tuned\n", + __func__, state->demod); + + /* try next rolloff value */ + if (state->dcur.rolloff == 3) + cmd.args[11]--; + + } while (--retune); + return -EINVAL; +} + +static int cx24117_tune(struct dvb_frontend *fe, bool re_tune, + unsigned int mode_flags, unsigned int *delay, fe_status_t *status) +{ + struct cx24117_state *state = fe->demodulator_priv; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + *delay = HZ / 5; + if (re_tune) { + int ret = cx24117_set_frontend(fe); + if (ret) + return ret; + } + return cx24117_read_status(fe, status); +} + +static int cx24117_get_algo(struct dvb_frontend *fe) +{ + return DVBFE_ALGO_HW; +} + +static int cx24117_get_frontend(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct cx24117_cmd cmd; + u8 reg, st, inv; + int ret, idx; + unsigned int freq; + short srate_os, freq_os; + + u8 buf[0x1f-4]; + + cmd.args[0] = 0x1c; + cmd.args[1] = (u8) state->demod; + cmd.len = 2; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + return ret; + + /* read all required regs at once */ + reg = (state->demod == 0) ? CX24117_REG_FREQ3_0 : CX24117_REG_FREQ3_1; + ret = cx24117_readregN(state, reg, buf, 0x1f-4); + if (ret != 0) + return ret; + + st = buf[5]; + + /* get spectral inversion */ + inv = (((state->demod == 0) ? ~st : st) >> 6) & 1; + if (inv == 0) + c->inversion = INVERSION_OFF; + else + c->inversion = INVERSION_ON; + + /* modulation and fec */ + idx = st & 0x3f; + if (c->delivery_system == SYS_DVBS2) { + if (idx > 11) + idx += 9; + else + idx += 7; + } + + c->modulation = cx24117_modfec_modes[idx].modulation; + c->fec_inner = cx24117_modfec_modes[idx].fec; + + /* frequency */ + freq = (buf[0] << 16) | (buf[1] << 8) | buf[2]; + freq_os = (buf[8] << 8) | buf[9]; + c->frequency = freq + freq_os; + + /* symbol rate */ + srate_os = (buf[10] << 8) | buf[11]; + c->symbol_rate = -1000 * srate_os + state->dcur.symbol_rate; + return 0; +} + +static struct dvb_frontend_ops cx24117_ops = { + .delsys = { SYS_DVBS, SYS_DVBS2 }, + .info = { + .name = "Conexant CX24117/CX24132", + .frequency_min = 950000, + .frequency_max = 2150000, + .frequency_stepsize = 1011, /* kHz for QPSK frontends */ + .frequency_tolerance = 5000, + .symbol_rate_min = 1000000, + .symbol_rate_max = 45000000, + .caps = FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | + FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | + FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | + FE_CAN_2G_MODULATION | + FE_CAN_QPSK | FE_CAN_RECOVER + }, + + .release = cx24117_release, + + .init = cx24117_initfe, + .sleep = cx24117_sleep, + .read_status = cx24117_read_status, + .read_ber = cx24117_read_ber, + .read_signal_strength = cx24117_read_signal_strength, + .read_snr = cx24117_read_snr, + .read_ucblocks = cx24117_read_ucblocks, + .set_tone = cx24117_set_tone, + .set_voltage = cx24117_set_voltage, + .diseqc_send_master_cmd = cx24117_send_diseqc_msg, + .diseqc_send_burst = cx24117_diseqc_send_burst, + .get_frontend_algo = cx24117_get_algo, + .tune = cx24117_tune, + + .set_frontend = cx24117_set_frontend, + .get_frontend = cx24117_get_frontend, +}; + + +MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24117/cx24132 hardware"); +MODULE_AUTHOR("Luis Alves (ljalvs@gmail.com)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.1"); +MODULE_FIRMWARE(CX24117_DEFAULT_FIRMWARE); + diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h new file mode 100644 index 000000000000..4e59e9574fa7 --- /dev/null +++ b/drivers/media/dvb-frontends/cx24117.h @@ -0,0 +1,47 @@ +/* + Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver + + Copyright (C) 2013 Luis Alves <ljalvs@gmail.com> + (based on cx24116.h by Steven Toth) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef CX24117_H +#define CX24117_H + +#include <linux/kconfig.h> +#include <linux/dvb/frontend.h> + +struct cx24117_config { + /* the demodulator's i2c address */ + u8 demod_address; +}; + +#if IS_ENABLED(CONFIG_DVB_CX24117) +extern struct dvb_frontend *cx24117_attach( + const struct cx24117_config *config, + struct i2c_adapter *i2c); +#else +static inline struct dvb_frontend *cx24117_attach( + const struct cx24117_config *config, + struct i2c_adapter *i2c) +{ + dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__); + return NULL; +} +#endif + +#endif /* CX24117_H */ diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index a771da3e9f99..72fb5838cae0 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c @@ -739,7 +739,7 @@ static int cx24123_set_voltage(struct dvb_frontend *fe, return 0; default: return -EINVAL; - }; + } return 0; } diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c index 7ca5c69dd200..03930d5e9fea 100644 --- a/drivers/media/dvb-frontends/cxd2820r_core.c +++ b/drivers/media/dvb-frontends/cxd2820r_core.c @@ -21,21 +21,31 @@ #include "cxd2820r_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple registers */ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, u8 *val, int len) { int ret; - u8 buf[len+1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = i2c, .flags = 0, - .len = sizeof(buf), + .len = len + 1, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -55,7 +65,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = i2c, @@ -65,11 +75,18 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, }, { .addr = i2c, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c index 6201c59a78dd..e540cfb13bac 100644 --- a/drivers/media/dvb-frontends/dib9000.c +++ b/drivers/media/dvb-frontends/dib9000.c @@ -649,9 +649,9 @@ static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 si b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */ if (*b == '~') { b++; - dprintk(b); + dprintk("%s", b); } else - dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>"); + dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<empty>"); return 1; } diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c index 9a2134792cfa..959ae36403b8 100644 --- a/drivers/media/dvb-frontends/drxd_hard.c +++ b/drivers/media/dvb-frontends/drxd_hard.c @@ -46,10 +46,6 @@ #define DRX_I2C_MODEFLAGS 0xC0 #define DRX_I2C_FLAGS 0xF0 -#ifndef SIZEOF_ARRAY -#define SIZEOF_ARRAY(array) (sizeof((array))/sizeof((array)[0])) -#endif - #define DEFAULT_LOCK_TIMEOUT 1100 #define DRX_CHANNEL_AUTO 0 @@ -1018,7 +1014,7 @@ static int HI_CfgCommand(struct drxd_state *state) status = Write16(state, HI_RA_RAM_SRV_CMD__A, HI_RA_RAM_SRV_CMD_CONFIG, 0); else - status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, 0); + status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, NULL); mutex_unlock(&state->mutex); return status; } @@ -1039,7 +1035,7 @@ static int HI_ResetCommand(struct drxd_state *state) status = Write16(state, HI_RA_RAM_SRV_RST_KEY__A, HI_RA_RAM_SRV_RST_KEY_ACT, 0); if (status == 0) - status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, 0); + status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, NULL); mutex_unlock(&state->mutex); msleep(1); return status; @@ -2837,7 +2833,7 @@ static int drxd_init(struct dvb_frontend *fe) int err = 0; /* if (request_firmware(&state->fw, "drxd.fw", state->dev)<0) */ - return DRXD_init(state, 0, 0); + return DRXD_init(state, NULL, 0); err = DRXD_init(state, state->fw->data, state->fw->size); release_firmware(state->fw); @@ -2973,7 +2969,7 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config, mutex_init(&state->mutex); - if (Read16(state, 0, 0, 0) < 0) + if (Read16(state, 0, NULL, 0) < 0) goto error; state->frontend.ops = drxd_ops; diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 082014de6875..d416c15691da 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -1083,7 +1083,7 @@ static int hi_cfg_command(struct drxk_state *state) SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY); if (status < 0) goto error; - status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0); + status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, NULL); if (status < 0) goto error; @@ -2781,7 +2781,7 @@ static int ConfigureI2CBridge(struct drxk_state *state, bool b_enable_bridge) goto error; } - status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0); + status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, NULL); error: if (status < 0) diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c index c1c3400b2173..cadcae4cff89 100644 --- a/drivers/media/dvb-frontends/itd1000.c +++ b/drivers/media/dvb-frontends/itd1000.c @@ -31,6 +31,9 @@ #include "itd1000.h" #include "itd1000_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); @@ -52,10 +55,18 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); /* don't write more than one byte with flexcop behind */ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 len) { - u8 buf[1+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = buf, .len = len+1 }; + + if (1 + len > sizeof(buf)) { + printk(KERN_WARNING + "itd1000: i2c wr reg=%04x: len=%d is too big!\n", + reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], v, len); diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index ec388c1d6913..a74ac0ddb833 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -36,6 +36,8 @@ #include "mt312_priv.h" #include "mt312.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 struct mt312_state { struct i2c_adapter *i2c; @@ -96,9 +98,15 @@ static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg, const u8 *src, const size_t count) { int ret; - u8 buf[count + 1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg; + if (1 + count > sizeof(buf)) { + printk(KERN_WARNING + "mt312: write: len=%zd is too big!\n", count); + return -EINVAL; + } + if (debug) { int i; dprintk("W(%d):", reg & 0x7f); diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c index 8e288940a61f..fbca9856313a 100644 --- a/drivers/media/dvb-frontends/nxt200x.c +++ b/drivers/media/dvb-frontends/nxt200x.c @@ -39,6 +39,9 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw" #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw" #define CRC_CCIT_MASK 0x1021 @@ -95,10 +98,16 @@ static int i2c_readbytes(struct nxt200x_state *state, u8 addr, u8 *buf, u8 len) static int nxt200x_writebytes (struct nxt200x_state* state, u8 reg, const u8 *buf, u8 len) { - u8 buf2 [len+1]; + u8 buf2[MAX_XFER_SIZE]; int err; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf2, .len = len + 1 }; + if (1 + len > sizeof(buf2)) { + pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n", + __func__, reg, len); + return -EINVAL; + } + buf2[0] = reg; memcpy(&buf2[1], buf, len); diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 362d26d11e82..7efb796c472c 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -27,20 +27,30 @@ #include "rtl2830_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple hardware registers */ static int rtl2830_wr(struct rtl2830_priv *priv, u8 reg, const u8 *val, int len) { int ret; - u8 buf[1+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg.i2c_addr, .flags = 0, - .len = 1+len, + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index facb84841518..ff73da9365e3 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -22,6 +22,9 @@ #include "dvb_math.h" #include <linux/bitops.h> +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + int rtl2832_debug; module_param_named(debug, rtl2832_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); @@ -162,16 +165,23 @@ static const struct rtl2832_reg_entry registers[] = { static int rtl2832_wr(struct rtl2832_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[1+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg.i2c_addr, .flags = 0, - .len = 1+len, + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -489,6 +499,7 @@ static int rtl2832_init(struct dvb_frontend *fe) init = rtl2832_tuner_init_e4000; break; case RTL2832_TUNER_R820T: + case RTL2832_TUNER_R828D: len = ARRAY_SIZE(rtl2832_tuner_init_r820t); init = rtl2832_tuner_init_r820t; break; diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h index 91b2dcf5a6ea..2cfbb6a97061 100644 --- a/drivers/media/dvb-frontends/rtl2832.h +++ b/drivers/media/dvb-frontends/rtl2832.h @@ -53,6 +53,7 @@ struct rtl2832_config { #define RTL2832_TUNER_E4000 0x27 #define RTL2832_TUNER_FC0013 0x29 #define RTL2832_TUNER_R820T 0x2a +#define RTL2832_TUNER_R828D 0x2b u8 tuner; }; diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c index e2fec9ebf947..93eeaf7118fd 100644 --- a/drivers/media/dvb-frontends/s5h1420.c +++ b/drivers/media/dvb-frontends/s5h1420.c @@ -836,9 +836,16 @@ static u32 s5h1420_tuner_i2c_func(struct i2c_adapter *adapter) static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct s5h1420_state *state = i2c_get_adapdata(i2c_adap); - struct i2c_msg m[1 + num]; + struct i2c_msg m[3]; u8 tx_open[2] = { CON_1, state->CON_1_val | 1 }; /* repeater stops once there was a stop condition */ + if (1 + num > ARRAY_SIZE(m)) { + printk(KERN_WARNING + "%s: i2c xfer: num=%d is too big!\n", + KBUILD_MODNAME, num); + return -EOPNOTSUPP; + } + memset(m, 0, sizeof(struct i2c_msg) * (1 + num)); m[0].addr = state->config->demod_address; @@ -847,7 +854,7 @@ static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c memcpy(&m[1], msg, sizeof(struct i2c_msg) * num); - return i2c_transfer(state->i2c, m, 1+num) == 1 + num ? num : -EIO; + return i2c_transfer(state->i2c, m, 1 + num) == 1 + num ? num : -EIO; } static struct i2c_algorithm s5h1420_tuner_i2c_algo = { diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c index 3dd5714eadba..07cd5ea7a038 100644 --- a/drivers/media/dvb-frontends/stb0899_drv.c +++ b/drivers/media/dvb-frontends/stb0899_drv.c @@ -32,6 +32,9 @@ #include "stb0899_priv.h" #include "stb0899_reg.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static unsigned int verbose = 0;//1; module_param(verbose, int, 0644); @@ -499,7 +502,7 @@ err: int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, u32 count) { int ret; - u8 buf[2 + count]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg i2c_msg = { .addr = state->config->demod_address, .flags = 0, @@ -507,6 +510,13 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, .len = 2 + count }; + if (2 + count > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, count); + return -EINVAL; + } + buf[0] = reg >> 8; buf[1] = reg & 0xff; memcpy(&buf[2], data, count); diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index 45f9523f968f..cea175d19890 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -31,6 +31,8 @@ static unsigned int verbose; module_param(verbose, int, 0644); +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 #define FE_ERROR 0 #define FE_NOTICE 1 @@ -183,7 +185,7 @@ static int stb6100_read_reg(struct stb6100_state *state, u8 reg) static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int start, int len) { int rc; - u8 cmdbuf[len + 1]; + u8 cmdbuf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->config->tuner_address, .flags = 0, @@ -191,6 +193,13 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st .len = len + 1 }; + if (1 + len > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EINVAL; + } + if (unlikely(start < 1 || start + len > STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register range %d:%d", start, len); diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 7b6dba3ce55e..458772739423 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -33,6 +33,9 @@ #include "stv0367_regs.h" #include "stv0367_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int stvdebug; module_param_named(debug, stvdebug, int, 0644); @@ -767,7 +770,7 @@ static struct st_register def0367cab[STV0367CAB_NBREGS] = { static int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) { - u8 buf[len + 2]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, @@ -776,6 +779,14 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) }; int ret; + if (2 + len > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + + buf[0] = MSB(reg); buf[1] = LSB(reg); memcpy(buf + 2, data, len); diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c index 56d470ad5a82..23e872f84742 100644 --- a/drivers/media/dvb-frontends/stv090x.c +++ b/drivers/media/dvb-frontends/stv090x.c @@ -35,6 +35,9 @@ #include "stv090x.h" #include "stv090x_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static unsigned int verbose; module_param(verbose, int, 0644); @@ -722,9 +725,16 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 { const struct stv090x_config *config = state->config; int ret; - u8 buf[2 + count]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count }; + if (2 + count > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, count); + return -EINVAL; + } + buf[0] = reg >> 8; buf[1] = reg & 0xff; memcpy(&buf[2], data, count); diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c index 20b5fa92c53e..b1425830a24e 100644 --- a/drivers/media/dvb-frontends/stv6110.c +++ b/drivers/media/dvb-frontends/stv6110.c @@ -30,6 +30,9 @@ #include "stv6110.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int debug; struct stv6110_priv { @@ -68,7 +71,7 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[], { struct stv6110_priv *priv = fe->tuner_priv; int rc; - u8 cmdbuf[len + 1]; + u8 cmdbuf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = priv->i2c_address, .flags = 0, @@ -78,6 +81,13 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[], dprintk("%s\n", __func__); + if (1 + len > sizeof(cmdbuf)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EINVAL; + } + if (start + len > 8) return -EINVAL; diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index f36cab12bdc7..e66154e5c1d7 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -32,6 +32,9 @@ #include "stv6110x.h" #include "stv6110x_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static unsigned int verbose; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "Set Verbosity level"); @@ -61,7 +64,8 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da { int ret; const struct stv6110x_config *config = stv6110x->config; - u8 buf[len + 1]; + u8 buf[MAX_XFER_SIZE]; + struct i2c_msg msg = { .addr = config->addr, .flags = 0, @@ -69,6 +73,13 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da .len = len + 1 }; + if (1 + len > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EINVAL; + } + if (start + len > 8) return -EINVAL; diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index e79749cfec81..8ad3a57cf640 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -20,6 +20,9 @@ #include "tda10071_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static struct dvb_frontend_ops tda10071_ops; /* write multiple registers */ @@ -27,16 +30,23 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len+1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg.demod_i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -56,7 +66,7 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg.demod_i2c_addr, @@ -66,11 +76,18 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val, }, { .addr = priv->cfg.demod_i2c_addr, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c index d281f77d5c28..2c54586ac07f 100644 --- a/drivers/media/dvb-frontends/tda18271c2dd.c +++ b/drivers/media/dvb-frontends/tda18271c2dd.c @@ -34,6 +34,9 @@ #include "dvb_frontend.h" #include "tda18271c2dd.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct SStandardParam { s32 m_IFFrequency; u32 m_BandWidth; @@ -139,11 +142,18 @@ static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len) static int WriteRegs(struct tda_state *state, u8 SubAddr, u8 *Regs, u16 nRegs) { - u8 data[nRegs+1]; + u8 data[MAX_XFER_SIZE]; + + if (1 + nRegs > sizeof(data)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, nRegs); + return -EINVAL; + } data[0] = SubAddr; memcpy(data + 1, Regs, nRegs); - return i2c_write(state->i2c, state->adr, data, nRegs+1); + return i2c_write(state->i2c, state->adr, data, nRegs + 1); } static int WriteReg(struct tda_state *state, u8 SubAddr, u8 Reg) diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c index 9d08350fe4b0..69e62f42e2e1 100644 --- a/drivers/media/dvb-frontends/tda8083.c +++ b/drivers/media/dvb-frontends/tda8083.c @@ -189,7 +189,7 @@ static int tda8083_set_tone (struct tda8083_state* state, fe_sec_tone_mode_t ton return tda8083_writereg (state, 0x29, 0x80); default: return -EINVAL; - }; + } } static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t voltage) @@ -201,7 +201,7 @@ static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t vo return tda8083_writereg (state, 0x20, 0x11); default: return -EINVAL; - }; + } } static int tda8083_send_diseqc_burst (struct tda8083_state* state, fe_sec_mini_cmd_t burst) diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index ad7ad857ab2a..9aba044dabed 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -31,6 +31,7 @@ struct ts2020_priv { struct i2c_adapter *i2c; u8 clk_out_div; u32 frequency; + u32 frequency_div; }; static int ts2020_release(struct dvb_frontend *fe) @@ -193,7 +194,7 @@ static int ts2020_set_params(struct dvb_frontend *fe) u8 lo = 0x01, div4 = 0x0; /* Calculate frequency divider */ - if (frequency < 1060000) { + if (frequency < priv->frequency_div) { lo |= 0x10; div4 = 0x1; ndiv = (frequency * 14 * 4) / TS2020_XTAL_FREQ; @@ -340,8 +341,12 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe, priv->i2c_address = config->tuner_address; priv->i2c = i2c; priv->clk_out_div = config->clk_out_div; + priv->frequency_div = config->frequency_div; fe->tuner_priv = priv; + if (!priv->frequency_div) + priv->frequency_div = 1060000; + /* Wake Up the tuner */ if ((0x03 & ts2020_readreg(fe, 0x00)) == 0x00) { ts2020_writereg(fe, 0x00, 0x01); diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h index 5bcb9a71ca80..b2fe6bb3a38b 100644 --- a/drivers/media/dvb-frontends/ts2020.h +++ b/drivers/media/dvb-frontends/ts2020.h @@ -28,6 +28,7 @@ struct ts2020_config { u8 tuner_address; u8 clk_out_div; + u32 frequency_div; }; #if IS_ENABLED(CONFIG_DVB_TS2020) diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c index eff9c5fde50a..91b6b2e9b792 100644 --- a/drivers/media/dvb-frontends/zl10039.c +++ b/drivers/media/dvb-frontends/zl10039.c @@ -30,6 +30,9 @@ static int debug; +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + #define dprintk(args...) \ do { \ if (debug) \ @@ -98,7 +101,7 @@ static int zl10039_write(struct zl10039_state *state, const enum zl10039_reg_addr reg, const u8 *src, const size_t count) { - u8 buf[count + 1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->i2c_addr, .flags = 0, @@ -106,6 +109,13 @@ static int zl10039_write(struct zl10039_state *state, .len = count + 1, }; + if (1 + count > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%zd is too big!\n", + KBUILD_MODNAME, reg, count); + return -EINVAL; + } + dprintk("%s\n", __func__); /* Write register address and data in one go */ buf[0] = reg; diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index d18be19c96cd..842654d33317 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -621,6 +621,15 @@ config VIDEO_AS3645A This is a driver for the AS3645A and LM3555 flash controllers. It has build in control for flash, torch and indicator LEDs. +config VIDEO_LM3560 + tristate "LM3560 dual flash driver support" + depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER + depends on MEDIA_CAMERA_SUPPORT + select REGMAP_I2C + ---help--- + This is a driver for the lm3560 dual flash controllers. It controls + flash, torch LEDs. + comment "Video improvement chips" config VIDEO_UPD64031A @@ -646,7 +655,7 @@ config VIDEO_UPD64083 To compile this driver as a module, choose M here: the module will be called upd64083. -comment "Miscelaneous helper chips" +comment "Miscellaneous helper chips" config VIDEO_THS7303 tristate "THS7303/53 Video Amplifier" diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 9f462df77b4a..e03f1776f4f4 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/ obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o +obj-$(CONFIG_VIDEO_LM3560) += lm3560.o obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o obj-$(CONFIG_VIDEO_AK881X) += ak881x.o obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c index 6f738d8e3a8f..d45e0e3a781d 100644 --- a/drivers/media/i2c/adv7183.c +++ b/drivers/media/i2c/adv7183.c @@ -178,7 +178,7 @@ static int adv7183_log_status(struct v4l2_subdev *sd) adv7183_read(sd, ADV7183_VS_FIELD_CTRL_1), adv7183_read(sd, ADV7183_VS_FIELD_CTRL_2), adv7183_read(sd, ADV7183_VS_FIELD_CTRL_3)); - v4l2_info(sd, "adv7183: Hsync positon control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", + v4l2_info(sd, "adv7183: Hsync position control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_HS_POS_CTRL_1), adv7183_read(sd, ADV7183_HS_POS_CTRL_2), adv7183_read(sd, ADV7183_HS_POS_CTRL_3)); diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c index aeb56c53e39f..d4e15a617c3b 100644 --- a/drivers/media/i2c/adv7343.c +++ b/drivers/media/i2c/adv7343.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/videodev2.h> #include <linux/uaccess.h> +#include <linux/of.h> #include <media/adv7343.h> #include <media/v4l2-async.h> diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c new file mode 100644 index 000000000000..3317a9ae3961 --- /dev/null +++ b/drivers/media/i2c/lm3560.c @@ -0,0 +1,488 @@ +/* + * drivers/media/i2c/lm3560.c + * General device driver for TI lm3560, FLASH LED Driver + * + * Copyright (C) 2013 Texas Instruments + * + * Contact: Daniel Jeong <gshark.jeong@gmail.com> + * Ldd-Mlp <ldd-mlp@list.ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/regmap.h> +#include <linux/videodev2.h> +#include <media/lm3560.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> + +/* registers definitions */ +#define REG_ENABLE 0x10 +#define REG_TORCH_BR 0xa0 +#define REG_FLASH_BR 0xb0 +#define REG_FLASH_TOUT 0xc0 +#define REG_FLAG 0xd0 +#define REG_CONFIG1 0xe0 + +/* Fault Mask */ +#define FAULT_TIMEOUT (1<<0) +#define FAULT_OVERTEMP (1<<1) +#define FAULT_SHORT_CIRCUIT (1<<2) + +enum led_enable { + MODE_SHDN = 0x0, + MODE_TORCH = 0x2, + MODE_FLASH = 0x3, +}; + +/* struct lm3560_flash + * + * @pdata: platform data + * @regmap: reg. map for i2c + * @lock: muxtex for serial access. + * @led_mode: V4L2 LED mode + * @ctrls_led: V4L2 contols + * @subdev_led: V4L2 subdev + */ +struct lm3560_flash { + struct device *dev; + struct lm3560_platform_data *pdata; + struct regmap *regmap; + struct mutex lock; + + enum v4l2_flash_led_mode led_mode; + struct v4l2_ctrl_handler ctrls_led[LM3560_LED_MAX]; + struct v4l2_subdev subdev_led[LM3560_LED_MAX]; +}; + +#define to_lm3560_flash(_ctrl, _no) \ + container_of(_ctrl->handler, struct lm3560_flash, ctrls_led[_no]) + +/* enable mode control */ +static int lm3560_mode_ctrl(struct lm3560_flash *flash) +{ + int rval = -EINVAL; + + switch (flash->led_mode) { + case V4L2_FLASH_LED_MODE_NONE: + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x03, MODE_SHDN); + break; + case V4L2_FLASH_LED_MODE_TORCH: + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x03, MODE_TORCH); + break; + case V4L2_FLASH_LED_MODE_FLASH: + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x03, MODE_FLASH); + break; + } + return rval; +} + +/* led1/2 enable/disable */ +static int lm3560_enable_ctrl(struct lm3560_flash *flash, + enum lm3560_led_id led_no, bool on) +{ + int rval; + + if (led_no == LM3560_LED0) { + if (on == true) + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x08, 0x08); + else + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x08, 0x00); + } else { + if (on == true) + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x10, 0x10); + else + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x10, 0x00); + } + return rval; +} + +/* torch1/2 brightness control */ +static int lm3560_torch_brt_ctrl(struct lm3560_flash *flash, + enum lm3560_led_id led_no, unsigned int brt) +{ + int rval; + u8 br_bits; + + if (brt < LM3560_TORCH_BRT_MIN) + return lm3560_enable_ctrl(flash, led_no, false); + else + rval = lm3560_enable_ctrl(flash, led_no, true); + + br_bits = LM3560_TORCH_BRT_uA_TO_REG(brt); + if (led_no == LM3560_LED0) + rval = regmap_update_bits(flash->regmap, + REG_TORCH_BR, 0x07, br_bits); + else + rval = regmap_update_bits(flash->regmap, + REG_TORCH_BR, 0x38, br_bits << 3); + + return rval; +} + +/* flash1/2 brightness control */ +static int lm3560_flash_brt_ctrl(struct lm3560_flash *flash, + enum lm3560_led_id led_no, unsigned int brt) +{ + int rval; + u8 br_bits; + + if (brt < LM3560_FLASH_BRT_MIN) + return lm3560_enable_ctrl(flash, led_no, false); + else + rval = lm3560_enable_ctrl(flash, led_no, true); + + br_bits = LM3560_FLASH_BRT_uA_TO_REG(brt); + if (led_no == LM3560_LED0) + rval = regmap_update_bits(flash->regmap, + REG_FLASH_BR, 0x0f, br_bits); + else + rval = regmap_update_bits(flash->regmap, + REG_FLASH_BR, 0xf0, br_bits << 4); + + return rval; +} + +/* V4L2 controls */ +static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no) +{ + struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no); + + mutex_lock(&flash->lock); + + if (ctrl->id == V4L2_CID_FLASH_FAULT) { + int rval; + s32 fault = 0; + unsigned int reg_val; + rval = regmap_read(flash->regmap, REG_FLAG, ®_val); + if (rval < 0) + return rval; + if (rval & FAULT_SHORT_CIRCUIT) + fault |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; + if (rval & FAULT_OVERTEMP) + fault |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; + if (rval & FAULT_TIMEOUT) + fault |= V4L2_FLASH_FAULT_TIMEOUT; + ctrl->cur.val = fault; + return 0; + } + + mutex_unlock(&flash->lock); + return -EINVAL; +} + +static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no) +{ + struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no); + u8 tout_bits; + int rval = -EINVAL; + + mutex_lock(&flash->lock); + + switch (ctrl->id) { + case V4L2_CID_FLASH_LED_MODE: + flash->led_mode = ctrl->val; + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) + rval = lm3560_mode_ctrl(flash); + break; + + case V4L2_CID_FLASH_STROBE_SOURCE: + rval = regmap_update_bits(flash->regmap, + REG_CONFIG1, 0x04, (ctrl->val) << 2); + if (rval < 0) + goto err_out; + break; + + case V4L2_CID_FLASH_STROBE: + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) + return -EBUSY; + flash->led_mode = V4L2_FLASH_LED_MODE_FLASH; + rval = lm3560_mode_ctrl(flash); + break; + + case V4L2_CID_FLASH_STROBE_STOP: + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) + return -EBUSY; + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + rval = lm3560_mode_ctrl(flash); + break; + + case V4L2_CID_FLASH_TIMEOUT: + tout_bits = LM3560_FLASH_TOUT_ms_TO_REG(ctrl->val); + rval = regmap_update_bits(flash->regmap, + REG_FLASH_TOUT, 0x1f, tout_bits); + break; + + case V4L2_CID_FLASH_INTENSITY: + rval = lm3560_flash_brt_ctrl(flash, led_no, ctrl->val); + break; + + case V4L2_CID_FLASH_TORCH_INTENSITY: + rval = lm3560_torch_brt_ctrl(flash, led_no, ctrl->val); + break; + } + + mutex_unlock(&flash->lock); +err_out: + return rval; +} + +static int lm3560_led1_get_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_get_ctrl(ctrl, LM3560_LED1); +} + +static int lm3560_led1_set_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_set_ctrl(ctrl, LM3560_LED1); +} + +static int lm3560_led0_get_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_get_ctrl(ctrl, LM3560_LED0); +} + +static int lm3560_led0_set_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_set_ctrl(ctrl, LM3560_LED0); +} + +static const struct v4l2_ctrl_ops lm3560_led_ctrl_ops[LM3560_LED_MAX] = { + [LM3560_LED0] = { + .g_volatile_ctrl = lm3560_led0_get_ctrl, + .s_ctrl = lm3560_led0_set_ctrl, + }, + [LM3560_LED1] = { + .g_volatile_ctrl = lm3560_led1_get_ctrl, + .s_ctrl = lm3560_led1_set_ctrl, + } +}; + +static int lm3560_init_controls(struct lm3560_flash *flash, + enum lm3560_led_id led_no) +{ + struct v4l2_ctrl *fault; + u32 max_flash_brt = flash->pdata->max_flash_brt[led_no]; + u32 max_torch_brt = flash->pdata->max_torch_brt[led_no]; + struct v4l2_ctrl_handler *hdl = &flash->ctrls_led[led_no]; + const struct v4l2_ctrl_ops *ops = &lm3560_led_ctrl_ops[led_no]; + + v4l2_ctrl_handler_init(hdl, 8); + /* flash mode */ + v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_LED_MODE, + V4L2_FLASH_LED_MODE_TORCH, ~0x7, + V4L2_FLASH_LED_MODE_NONE); + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + + /* flash source */ + v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_STROBE_SOURCE, + 0x1, ~0x3, V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + + /* flash strobe */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0); + /* flash strobe stop */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0); + + /* flash strobe timeout */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TIMEOUT, + LM3560_FLASH_TOUT_MIN, + flash->pdata->max_flash_timeout, + LM3560_FLASH_TOUT_STEP, + flash->pdata->max_flash_timeout); + + /* flash brt */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_INTENSITY, + LM3560_FLASH_BRT_MIN, max_flash_brt, + LM3560_FLASH_BRT_STEP, max_flash_brt); + + /* torch brt */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TORCH_INTENSITY, + LM3560_TORCH_BRT_MIN, max_torch_brt, + LM3560_TORCH_BRT_STEP, max_torch_brt); + + /* fault */ + fault = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_FAULT, 0, + V4L2_FLASH_FAULT_OVER_VOLTAGE + | V4L2_FLASH_FAULT_OVER_TEMPERATURE + | V4L2_FLASH_FAULT_SHORT_CIRCUIT + | V4L2_FLASH_FAULT_TIMEOUT, 0, 0); + if (fault != NULL) + fault->flags |= V4L2_CTRL_FLAG_VOLATILE; + + if (hdl->error) + return hdl->error; + + flash->subdev_led[led_no].ctrl_handler = hdl; + return 0; +} + +/* initialize device */ +static const struct v4l2_subdev_ops lm3560_ops = { + .core = NULL, +}; + +static const struct regmap_config lm3560_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xFF, +}; + +static int lm3560_subdev_init(struct lm3560_flash *flash, + enum lm3560_led_id led_no, char *led_name) +{ + struct i2c_client *client = to_i2c_client(flash->dev); + int rval; + + v4l2_i2c_subdev_init(&flash->subdev_led[led_no], client, &lm3560_ops); + flash->subdev_led[led_no].flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + strcpy(flash->subdev_led[led_no].name, led_name); + rval = lm3560_init_controls(flash, led_no); + if (rval) + goto err_out; + rval = media_entity_init(&flash->subdev_led[led_no].entity, 0, NULL, 0); + if (rval < 0) + goto err_out; + flash->subdev_led[led_no].entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH; + + return rval; + +err_out: + v4l2_ctrl_handler_free(&flash->ctrls_led[led_no]); + return rval; +} + +static int lm3560_init_device(struct lm3560_flash *flash) +{ + int rval; + unsigned int reg_val; + + /* set peak current */ + rval = regmap_update_bits(flash->regmap, + REG_FLASH_TOUT, 0x60, flash->pdata->peak); + if (rval < 0) + return rval; + /* output disable */ + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + rval = lm3560_mode_ctrl(flash); + if (rval < 0) + return rval; + /* Reset faults */ + rval = regmap_read(flash->regmap, REG_FLAG, ®_val); + return rval; +} + +static int lm3560_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct lm3560_flash *flash; + struct lm3560_platform_data *pdata = dev_get_platdata(&client->dev); + int rval; + + flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL); + if (flash == NULL) + return -ENOMEM; + + flash->regmap = devm_regmap_init_i2c(client, &lm3560_regmap); + if (IS_ERR(flash->regmap)) { + rval = PTR_ERR(flash->regmap); + return rval; + } + + /* if there is no platform data, use chip default value */ + if (pdata == NULL) { + pdata = + kzalloc(sizeof(struct lm3560_platform_data), GFP_KERNEL); + if (pdata == NULL) + return -ENODEV; + pdata->peak = LM3560_PEAK_3600mA; + pdata->max_flash_timeout = LM3560_FLASH_TOUT_MAX; + /* led 1 */ + pdata->max_flash_brt[LM3560_LED0] = LM3560_FLASH_BRT_MAX; + pdata->max_torch_brt[LM3560_LED0] = LM3560_TORCH_BRT_MAX; + /* led 2 */ + pdata->max_flash_brt[LM3560_LED1] = LM3560_FLASH_BRT_MAX; + pdata->max_torch_brt[LM3560_LED1] = LM3560_TORCH_BRT_MAX; + } + flash->pdata = pdata; + flash->dev = &client->dev; + mutex_init(&flash->lock); + + rval = lm3560_subdev_init(flash, LM3560_LED0, "lm3560-led0"); + if (rval < 0) + return rval; + + rval = lm3560_subdev_init(flash, LM3560_LED1, "lm3560-led1"); + if (rval < 0) + return rval; + + rval = lm3560_init_device(flash); + if (rval < 0) + return rval; + + return 0; +} + +static int lm3560_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct lm3560_flash *flash = container_of(subdev, struct lm3560_flash, + subdev_led[LM3560_LED_MAX]); + unsigned int i; + + for (i = LM3560_LED0; i < LM3560_LED_MAX; i++) { + v4l2_device_unregister_subdev(&flash->subdev_led[i]); + v4l2_ctrl_handler_free(&flash->ctrls_led[i]); + media_entity_cleanup(&flash->subdev_led[i].entity); + } + + return 0; +} + +static const struct i2c_device_id lm3560_id_table[] = { + {LM3560_NAME, 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, lm3560_id_table); + +static struct i2c_driver lm3560_i2c_driver = { + .driver = { + .name = LM3560_NAME, + .pm = NULL, + }, + .probe = lm3560_probe, + .remove = lm3560_remove, + .id_table = lm3560_id_table, +}; + +module_i2c_driver(lm3560_i2c_driver); + +MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>"); +MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>"); +MODULE_DESCRIPTION("Texas Instruments LM3560 LED flash driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index b76ec0e7e685..6fec9384d86e 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1581,7 +1581,7 @@ static int s5c73m3_probe(struct i2c_client *client, oif_sd = &state->oif_sd; v4l2_subdev_init(sd, &s5c73m3_subdev_ops); - sd->owner = client->driver->driver.owner; + sd->owner = client->dev.driver->owner; v4l2_set_subdevdata(sd, state); strlcpy(sd->name, "S5C73M3", sizeof(sd->name)); @@ -1651,7 +1651,7 @@ static int s5c73m3_probe(struct i2c_client *client, if (ret < 0) goto out_err; - v4l2_info(sd, "%s: completed succesfully\n", __func__); + v4l2_info(sd, "%s: completed successfully\n", __func__); return 0; out_err: diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c index 1d384a371b41..5b915936c3f3 100644 --- a/drivers/media/i2c/soc_camera/imx074.c +++ b/drivers/media/i2c/soc_camera/imx074.c @@ -451,7 +451,9 @@ static int imx074_probe(struct i2c_client *client, if (ret < 0) goto eprobe; - return v4l2_async_register_subdev(&priv->subdev); + ret = v4l2_async_register_subdev(&priv->subdev); + if (!ret) + return 0; epwrinit: eprobe: diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c index e968c3fdbd9e..bc74224503e7 100644 --- a/drivers/media/i2c/soc_camera/ov9640.c +++ b/drivers/media/i2c/soc_camera/ov9640.c @@ -371,7 +371,7 @@ static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code, alt->com13 = OV9640_COM13_RGB_AVG; alt->com15 = OV9640_COM15_RGB_565; break; - }; + } } /* Setup registers according to resolution and color encoding */ diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c index d9f65d7e3e58..04139eec8c4e 100644 --- a/drivers/media/i2c/ths8200.c +++ b/drivers/media/i2c/ths8200.c @@ -19,6 +19,7 @@ #include <linux/i2c.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/v4l2-dv-timings.h> #include <media/v4l2-dv-timings.h> diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c index 91f3dd4cda1b..83d85df4853a 100644 --- a/drivers/media/i2c/tvp514x.c +++ b/drivers/media/i2c/tvp514x.c @@ -35,6 +35,7 @@ #include <linux/videodev2.h> #include <linux/module.h> #include <linux/v4l2-mediabus.h> +#include <linux/of.h> #include <media/v4l2-async.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c index 24a08fa7e328..912e1cccdd1c 100644 --- a/drivers/media/i2c/tvp7002.c +++ b/drivers/media/i2c/tvp7002.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/v4l2-dv-timings.h> #include <media/tvp7002.h> #include <media/v4l2-async.h> diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index 447afbd904a4..8b5e0b3a92a0 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -319,7 +319,6 @@ static int flexcop_pci_init(struct flexcop_pci *fc_pci) err_pci_iounmap: pci_iounmap(fc_pci->pdev, fc_pci->io_mem); - pci_set_drvdata(fc_pci->pdev, NULL); err_pci_release_regions: pci_release_regions(fc_pci->pdev); err_pci_disable_device: @@ -332,7 +331,6 @@ static void flexcop_pci_exit(struct flexcop_pci *fc_pci) if (fc_pci->init_state & FC_PCI_INIT) { free_irq(fc_pci->pdev->irq, fc_pci); pci_iounmap(fc_pci->pdev, fc_pci->io_mem); - pci_set_drvdata(fc_pci->pdev, NULL); pci_release_regions(fc_pci->pdev); pci_disable_device(fc_pci->pdev); } diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index 66eb0baab0e9..d0c281f41a0a 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -488,8 +488,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) btwrite(0, BT848_INT_MASK); result = request_irq(bt->irq, bt878_irq, - IRQF_SHARED | IRQF_DISABLED, "bt878", - (void *) bt); + IRQF_SHARED, "bt878", (void *) bt); if (result == -EINVAL) { printk(KERN_ERR "bt878(%d): Bad irq number or handler\n", bt878_num); @@ -563,7 +562,6 @@ static void bt878_remove(struct pci_dev *pci_dev) bt->shutdown = 1; bt878_mem_free(bt); - pci_set_drvdata(pci_dev, NULL); pci_disable_device(pci_dev); return; } diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index c6532de0eac7..a3b1ee9c00d7 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4086,7 +4086,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) /* disable irqs, register irq handler */ btwrite(0, BT848_INT_MASK); result = request_irq(btv->c.pci->irq, bttv_irq, - IRQF_SHARED | IRQF_DISABLED, btv->c.v4l2_dev.name, (void *)btv); + IRQF_SHARED, btv->c.v4l2_dev.name, (void *)btv); if (result < 0) { pr_err("%d: can't get IRQ %d\n", bttv_num, btv->c.pci->irq); diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 004d8ace5019..c1f8cc6f14b2 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -324,23 +324,24 @@ static void cx18_eeprom_dump(struct cx18 *cx, unsigned char *eedata, int len) /* Hauppauge card? get values from tveeprom */ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) { - struct i2c_client c; + struct i2c_client *c; u8 eedata[256]; - memset(&c, 0, sizeof(c)); - strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name)); - c.adapter = &cx->i2c_adap[0]; - c.addr = 0xA0 >> 1; + c = kzalloc(sizeof(*c), GFP_KERNEL); + + strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name)); + c->adapter = &cx->i2c_adap[0]; + c->addr = 0xa0 >> 1; memset(tv, 0, sizeof(*tv)); - if (tveeprom_read(&c, eedata, sizeof(eedata))) - return; + if (tveeprom_read(c, eedata, sizeof(eedata))) + goto ret; switch (cx->card->type) { case CX18_CARD_HVR_1600_ESMT: case CX18_CARD_HVR_1600_SAMSUNG: case CX18_CARD_HVR_1600_S5H1411: - tveeprom_hauppauge_analog(&c, tv, eedata); + tveeprom_hauppauge_analog(c, tv, eedata); break; case CX18_CARD_YUAN_MPC718: case CX18_CARD_GOTVIEW_PCI_DVD3: @@ -354,6 +355,9 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) cx18_eeprom_dump(cx, eedata, sizeof(eedata)); break; } + +ret: + kfree(c); } static void cx18_process_eeprom(struct cx18 *cx) @@ -1031,8 +1035,7 @@ static int cx18_probe(struct pci_dev *pci_dev, /* Register IRQ */ retval = request_irq(cx->pci_dev->irq, cx18_irq_handler, - IRQF_SHARED | IRQF_DISABLED, - cx->v4l2_dev.name, (void *)cx); + IRQF_SHARED, cx->v4l2_dev.name, (void *)cx); if (retval) { CX18_ERR("Failed to register irq %d\n", retval); goto free_i2c; diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig index 5104c802f72f..d1dcb1d2e087 100644 --- a/drivers/media/pci/cx23885/Kconfig +++ b/drivers/media/pci/cx23885/Kconfig @@ -23,6 +23,7 @@ config VIDEO_CX23885 select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT + select DVB_CX24117 if MEDIA_SUBDRV_AUTOSELECT select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c index 7344849183a7..16fa7ea4d4aa 100644 --- a/drivers/media/pci/cx23885/cimax2.c +++ b/drivers/media/pci/cx23885/cimax2.c @@ -26,6 +26,10 @@ #include "cx23885.h" #include "cimax2.h" #include "dvb_ca_en50221.h" + +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /**** Bit definitions for MC417_RWD and MC417_OEN registers *** bits 31-16 +-----------+ @@ -125,7 +129,7 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, u8 *buf, int len) { int ret; - u8 buffer[len + 1]; + u8 buffer[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = addr, @@ -134,6 +138,13 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, .len = len + 1 }; + if (1 + len > sizeof(buffer)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buffer[0] = reg; memcpy(&buffer[1], buf, len); diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index 6a71a965e757..79f20c8c842e 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -223,6 +223,39 @@ struct cx23885_board cx23885_boards[] = { .name = "Leadtek Winfast PxDVR3200 H", .portc = CX23885_MPEG_DVB, }, + [CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200] = { + .name = "Leadtek Winfast PxPVR2200", + .porta = CX23885_ANALOG_VIDEO, + .tuner_type = TUNER_XC2028, + .tuner_addr = 0x61, + .tuner_bus = 1, + .input = {{ + .type = CX23885_VMUX_TELEVISION, + .vmux = CX25840_VIN2_CH1 | + CX25840_VIN5_CH2, + .amux = CX25840_AUDIO8, + .gpio0 = 0x704040, + }, { + .type = CX23885_VMUX_COMPOSITE1, + .vmux = CX25840_COMPOSITE1, + .amux = CX25840_AUDIO7, + .gpio0 = 0x704040, + }, { + .type = CX23885_VMUX_SVIDEO, + .vmux = CX25840_SVIDEO_LUMA3 | + CX25840_SVIDEO_CHROMA4, + .amux = CX25840_AUDIO7, + .gpio0 = 0x704040, + }, { + .type = CX23885_VMUX_COMPONENT, + .vmux = CX25840_VIN7_CH1 | + CX25840_VIN6_CH2 | + CX25840_VIN8_CH3 | + CX25840_COMPONENT_ON, + .amux = CX25840_AUDIO7, + .gpio0 = 0x704040, + } }, + }, [CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = { .name = "Leadtek Winfast PxDVR3200 H XC4000", .porta = CX23885_ANALOG_VIDEO, @@ -259,6 +292,16 @@ struct cx23885_board cx23885_boards[] = { .name = "TurboSight TBS 6920", .portb = CX23885_MPEG_DVB, }, + [CX23885_BOARD_TBS_6980] = { + .name = "TurboSight TBS 6980", + .portb = CX23885_MPEG_DVB, + .portc = CX23885_MPEG_DVB, + }, + [CX23885_BOARD_TBS_6981] = { + .name = "TurboSight TBS 6981", + .portb = CX23885_MPEG_DVB, + .portc = CX23885_MPEG_DVB, + }, [CX23885_BOARD_TEVII_S470] = { .name = "TeVii S470", .portb = CX23885_MPEG_DVB, @@ -688,6 +731,10 @@ struct cx23885_subid cx23885_subids[] = { .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H, }, { .subvendor = 0x107d, + .subdevice = 0x6f21, + .card = CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200, + }, { + .subvendor = 0x107d, .subdevice = 0x6f39, .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000, }, { @@ -699,6 +746,14 @@ struct cx23885_subid cx23885_subids[] = { .subdevice = 0x8888, .card = CX23885_BOARD_TBS_6920, }, { + .subvendor = 0x6980, + .subdevice = 0x8888, + .card = CX23885_BOARD_TBS_6980, + }, { + .subvendor = 0x6981, + .subdevice = 0x8888, + .card = CX23885_BOARD_TBS_6981, + }, { .subvendor = 0xd470, .subdevice = 0x9022, .card = CX23885_BOARD_TEVII_S470, @@ -1023,6 +1078,35 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data) dev->name, tv.model); } +/* Some TBS cards require initing a chip using a bitbanged SPI attached + to the cx23885 gpio's. If this chip doesn't get init'ed the demod + doesn't respond to any command. */ +static void tbs_card_init(struct cx23885_dev *dev) +{ + int i; + const u8 buf[] = { + 0xe0, 0x06, 0x66, 0x33, 0x65, + 0x01, 0x17, 0x06, 0xde}; + + switch (dev->board) { + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + cx_set(GP0_IO, 0x00070007); + usleep_range(1000, 10000); + cx_clear(GP0_IO, 2); + usleep_range(1000, 10000); + for (i = 0; i < 9 * 8; i++) { + cx_clear(GP0_IO, 7); + usleep_range(1000, 10000); + cx_set(GP0_IO, + ((buf[i >> 3] >> (7 - (i & 7))) & 1) | 4); + usleep_range(1000, 10000); + } + cx_set(GP0_IO, 7); + break; + } +} + int cx23885_tuner_callback(void *priv, int component, int command, int arg) { struct cx23885_tsport *port = priv; @@ -1043,6 +1127,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg) case CX23885_BOARD_HAUPPAUGE_HVR1500: case CX23885_BOARD_HAUPPAUGE_HVR1500Q: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_COMPRO_VIDEOMATE_E800: @@ -1208,6 +1293,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) cx_set(GP0_IO, 0x000f000f); break; case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_COMPRO_VIDEOMATE_E800: @@ -1225,6 +1311,8 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) cx_set(GP0_IO, 0x00040004); break; case CX23885_BOARD_TBS_6920: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: case CX23885_BOARD_PROF_8000: cx_write(MC417_CTL, 0x00000036); cx_write(MC417_OEN, 0x00001000); @@ -1473,6 +1561,8 @@ int cx23885_ir_init(struct cx23885_dev *dev) case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: if (!enable_885_ir) break; dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE); @@ -1516,6 +1606,8 @@ void cx23885_ir_fini(struct cx23885_dev *dev) case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: cx23885_irq_remove(dev, PCI_MSK_AV_CORE); /* sd_ir is a duplicate pointer to the AV Core, just clear it */ dev->sd_ir = NULL; @@ -1561,6 +1653,8 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev) case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: if (dev->sd_ir) cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE); break; @@ -1676,6 +1770,16 @@ void cx23885_card_setup(struct cx23885_dev *dev) ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; break; + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ + ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ + ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; + ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ + ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ + ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; + tbs_card_init(dev); + break; case CX23885_BOARD_MYGICA_X8506: case CX23885_BOARD_MAGICPRO_PROHDTVE2: case CX23885_BOARD_MYGICA_X8507: @@ -1704,6 +1808,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_HAUPPAUGE_HVR1700: case CX23885_BOARD_HAUPPAUGE_HVR1400: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_HAUPPAUGE_HVR1270: @@ -1733,6 +1838,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_HAUPPAUGE_HVR1800lp: case CX23885_BOARD_HAUPPAUGE_HVR1700: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: @@ -1752,6 +1858,8 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_MYGICA_X8507: case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_AVERMEDIA_HC81R: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[2].i2c_adap, "cx25840", 0x88 >> 1, NULL); diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 9f63d93239ec..edcd79db1e4e 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -2129,7 +2129,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev, } err = request_irq(pci_dev->irq, cx23885_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + IRQF_SHARED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name, pci_dev->irq); diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index 971e4ff1b87f..05492053b473 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -51,6 +51,7 @@ #include "stv6110.h" #include "lnbh24.h" #include "cx24116.h" +#include "cx24117.h" #include "cimax2.h" #include "lgs8gxx.h" #include "netup-eeprom.h" @@ -461,6 +462,10 @@ static struct cx24116_config tbs_cx24116_config = { .demod_address = 0x55, }; +static struct cx24117_config tbs_cx24117_config = { + .demod_address = 0x55, +}; + static struct ds3000_config tevii_ds3000_config = { .demod_address = 0x68, }; @@ -1044,6 +1049,25 @@ static int dvb_register(struct cx23885_tsport *port) fe0->dvb.frontend->ops.set_voltage = f300_set_voltage; break; + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + i2c_bus = &dev->i2c_bus[1]; + + switch (port->nr) { + /* PORT B */ + case 1: + fe0->dvb.frontend = dvb_attach(cx24117_attach, + &tbs_cx24117_config, + &i2c_bus->i2c_adap); + break; + /* PORT C */ + case 2: + fe0->dvb.frontend = dvb_attach(cx24117_attach, + &tbs_cx24117_config, + &i2c_bus->i2c_adap); + break; + } + break; case CX23885_BOARD_TEVII_S470: i2c_bus = &dev->i2c_bus[1]; diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c index 7875dfbe09ff..8a49e7c9eddd 100644 --- a/drivers/media/pci/cx23885/cx23885-input.c +++ b/drivers/media/pci/cx23885/cx23885-input.c @@ -90,6 +90,8 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events) case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: /* * The only boards we handle right now. However other boards * using the CX2388x integrated IR controller should be similar @@ -168,6 +170,8 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev) break; case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_TEVII_S470: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: /* * The IR controller on this board only returns pulse widths. * Any other mode setting will fail to set up the device. @@ -298,6 +302,14 @@ int cx23885_input_init(struct cx23885_dev *dev) /* A guess at the remote */ rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02; break; + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + /* Integrated CX23885 IR controller */ + driver_type = RC_DRIVER_IR_RAW; + allowed_protos = RC_BIT_ALL; + /* A guess at the remote */ + rc_map = RC_MAP_TBS_NEC; + break; default: return -ENODEV; } diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index 161686832b20..7891f34157d1 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -1865,7 +1865,8 @@ int cx23885_video_register(struct cx23885_dev *dev) v4l2_subdev_call(sd, tuner, s_type_addr, &tun_setup); - if (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) { + if ((dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) || + (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200)) { struct xc2028_ctrl ctrl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64 diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index 038caf53908b..0fa4048ab872 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -93,6 +93,9 @@ #define CX23885_BOARD_PROF_8000 37 #define CX23885_BOARD_HAUPPAUGE_HVR4400 38 #define CX23885_BOARD_AVERMEDIA_HC81R 39 +#define CX23885_BOARD_TBS_6981 40 +#define CX23885_BOARD_TBS_6980 41 +#define CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200 42 #define GPIO_0 0x00000001 #define GPIO_1 0x00000002 diff --git a/drivers/media/pci/cx25821/cx25821-cards.c b/drivers/media/pci/cx25821/cx25821-cards.c index 3b409feb03d8..f2ebc989b303 100644 --- a/drivers/media/pci/cx25821/cx25821-cards.c +++ b/drivers/media/pci/cx25821/cx25821-cards.c @@ -45,5 +45,3 @@ struct cx25821_board cx25821_boards[] = { }, }; - -const unsigned int cx25821_bcount = ARRAY_SIZE(cx25821_boards); diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.c b/drivers/media/pci/cx25821/cx25821-medusa-video.c index 22fa04415ccc..43bdfa4dfba1 100644 --- a/drivers/media/pci/cx25821/cx25821-medusa-video.c +++ b/drivers/media/pci/cx25821/cx25821-medusa-video.c @@ -438,7 +438,7 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width, decoder_count = decoder_select + 1; } else { decoder = 0; - decoder_count = _num_decoders; + decoder_count = dev->_max_num_decoders; } switch (width) { @@ -506,8 +506,6 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder, break; } - _display_field_cnt[decoder] = duration; - /* update hardware */ fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp); @@ -667,8 +665,6 @@ int medusa_video_init(struct cx25821_dev *dev) int ret_val = 0; int i = 0; - _num_decoders = dev->_max_num_decoders; - /* disable Auto source selection on all video decoders */ value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp); value &= 0xFFFFF0FF; @@ -685,8 +681,14 @@ int medusa_video_init(struct cx25821_dev *dev) if (ret_val < 0) goto error; - for (i = 0; i < _num_decoders; i++) - medusa_set_decoderduration(dev, i, _display_field_cnt[i]); + /* + * FIXME: due to a coding bug the duration was always 0. It's + * likely that it really should be something else, but due to the + * lack of documentation I have no idea what it should be. For + * now just fill in 0 as the duration. + */ + for (i = 0; i < dev->_max_num_decoders; i++) + medusa_set_decoderduration(dev, i, 0); /* Select monitor as DENC A input, power up the DAC */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp); @@ -717,7 +719,7 @@ int medusa_video_init(struct cx25821_dev *dev) /* Turn on all of the data out and control output pins. */ value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp); value &= 0xFEF0FE00; - if (_num_decoders == MAX_DECODERS) { + if (dev->_max_num_decoders == MAX_DECODERS) { /* * Note: The octal board does not support control pins(bit16-19) * These bits are ignored in the octal board. diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.h b/drivers/media/pci/cx25821/cx25821-medusa-video.h index 6175e0961855..8bf602ff27b1 100644 --- a/drivers/media/pci/cx25821/cx25821-medusa-video.h +++ b/drivers/media/pci/cx25821/cx25821-medusa-video.h @@ -40,10 +40,4 @@ #define CONTRAST_DEFAULT 5000 #define HUE_DEFAULT 5000 -unsigned short _num_decoders; -unsigned short _num_cameras; - -unsigned int _video_standard; -int _display_field_cnt[MAX_DECODERS]; - #endif diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c index 88ffef410c50..1f43be0b04c8 100644 --- a/drivers/media/pci/cx25821/cx25821-video-upstream.c +++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c @@ -159,10 +159,10 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_channel *chan, __le32 * For the upstream video channel, the risc engine will enable * the FIFO. */ if (fifo_enable && line == 3) { - *(rp++) = RISC_WRITECR; - *(rp++) = sram_ch->dma_ctl; - *(rp++) = FLD_VID_FIFO_EN; - *(rp++) = 0x00000001; + *(rp++) = cpu_to_le32(RISC_WRITECR); + *(rp++) = cpu_to_le32(sram_ch->dma_ctl); + *(rp++) = cpu_to_le32(FLD_VID_FIFO_EN); + *(rp++) = cpu_to_le32(0x00000001); } } diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index aba5b1c649e6..400eb1c42d3f 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -834,7 +834,7 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci, /* get irq */ err = request_irq(chip->pci->irq, cx8801_irq, - IRQF_SHARED | IRQF_DISABLED, chip->core->name, chip); + IRQF_SHARED, chip->core->name, chip); if (err < 0) { dprintk(0, "%s: can't get IRQ %d\n", chip->core->name, chip->pci->irq); @@ -935,8 +935,6 @@ static void cx88_audio_finidev(struct pci_dev *pci) snd_card_free((void *)card); - pci_set_drvdata(pci, NULL); - devno--; } @@ -951,27 +949,4 @@ static struct pci_driver cx88_audio_pci_driver = { .remove = cx88_audio_finidev, }; -/**************************************************************************** - LINUX MODULE INIT - ****************************************************************************/ - -/* - * module init - */ -static int __init cx88_audio_init(void) -{ - printk(KERN_INFO "cx2388x alsa driver version %s loaded\n", - CX88_VERSION); - return pci_register_driver(&cx88_audio_pci_driver); -} - -/* - * module remove - */ -static void __exit cx88_audio_fini(void) -{ - pci_unregister_driver(&cx88_audio_pci_driver); -} - -module_init(cx88_audio_init); -module_exit(cx88_audio_fini); +module_pci_driver(cx88_audio_pci_driver); diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index 2d3507eb4897..74b7b8614c23 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c @@ -499,7 +499,7 @@ static int cx8802_init_common(struct cx8802_dev *dev) /* get irq */ err = request_irq(dev->pci->irq, cx8802_irq, - IRQF_SHARED | IRQF_DISABLED, dev->core->name, dev); + IRQF_SHARED, dev->core->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->core->name, dev->pci->irq); @@ -520,7 +520,6 @@ static void cx8802_fini_common(struct cx8802_dev *dev) /* unregister stuff */ free_irq(dev->pci->irq, dev); - pci_set_drvdata(dev->pci, NULL); /* free memory */ btcx_riscmem_free(dev->pci,&dev->mpegq.stopper); @@ -903,20 +902,8 @@ static struct pci_driver cx8802_pci_driver = { .remove = cx8802_remove, }; -static int __init cx8802_init(void) -{ - printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %s loaded\n", - CX88_VERSION); - return pci_register_driver(&cx8802_pci_driver); -} - -static void __exit cx8802_fini(void) -{ - pci_unregister_driver(&cx8802_pci_driver); -} +module_pci_driver(cx8802_pci_driver); -module_init(cx8802_init); -module_exit(cx8802_fini); EXPORT_SYMBOL(cx8802_buf_prepare); EXPORT_SYMBOL(cx8802_buf_queue); EXPORT_SYMBOL(cx8802_cancel_buffers); diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index ecf21d9f1f34..ed8cb9037b6f 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1738,7 +1738,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, /* get irq */ err = request_irq(pci_dev->irq, cx8800_irq, - IRQF_SHARED | IRQF_DISABLED, core->name, dev); + IRQF_SHARED, core->name, dev); if (err < 0) { printk(KERN_ERR "%s/0: can't get IRQ %d\n", core->name,pci_dev->irq); @@ -1922,7 +1922,6 @@ static void cx8800_finidev(struct pci_dev *pci_dev) free_irq(pci_dev->irq, dev); cx8800_unregister_video(dev); - pci_set_drvdata(pci_dev, NULL); /* free memory */ btcx_riscmem_free(dev->pci,&dev->vidq.stopper); @@ -2039,17 +2038,4 @@ static struct pci_driver cx8800_pci_driver = { #endif }; -static int __init cx8800_init(void) -{ - printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %s loaded\n", - CX88_VERSION); - return pci_register_driver(&cx8800_pci_driver); -} - -static void __exit cx8800_fini(void) -{ - pci_unregister_driver(&cx8800_pci_driver); -} - -module_init(cx8800_init); -module_exit(cx8800_fini); +module_pci_driver(cx8800_pci_driver); diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 36e34522b9a8..9375f30d9a81 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -1544,7 +1544,7 @@ static void ddb_unmap(struct ddb *dev) static void ddb_remove(struct pci_dev *pdev) { - struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev); + struct ddb *dev = pci_get_drvdata(pdev); ddb_ports_detach(dev); ddb_i2c_release(dev); diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c index ab797fe466d2..e60ac35fc10c 100644 --- a/drivers/media/pci/dm1105/dm1105.c +++ b/drivers/media/pci/dm1105/dm1105.c @@ -1178,7 +1178,6 @@ err_pci_release_regions: err_pci_disable_device: pci_disable_device(pdev); err_kfree: - pci_set_drvdata(pdev, NULL); kfree(dev); return ret; } @@ -1202,8 +1201,7 @@ static void dm1105_remove(struct pci_dev *pdev) dvb_dmxdev_release(&dev->dmxdev); dvb_dmx_release(dvbdemux); dvb_unregister_adapter(dvb_adapter); - if (&dev->i2c_adap) - i2c_del_adapter(&dev->i2c_adap); + i2c_del_adapter(&dev->i2c_adap); dm1105_hw_exit(dev); synchronize_irq(pdev->irq); @@ -1211,7 +1209,6 @@ static void dm1105_remove(struct pci_dev *pdev) pci_iounmap(pdev, dev->io_mem); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); dm1105_devcount--; kfree(dev); } diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index c08ae3eb9554..802642d26643 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -1261,7 +1261,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) /* Register IRQ */ retval = request_irq(itv->pdev->irq, ivtv_irq_handler, - IRQF_SHARED | IRQF_DISABLED, itv->v4l2_dev.name, (void *)itv); + IRQF_SHARED, itv->v4l2_dev.name, (void *)itv); if (retval) { IVTV_ERR("Failed to register irq %d\n", retval); goto free_i2c; diff --git a/drivers/media/pci/mantis/mantis_pci.c b/drivers/media/pci/mantis/mantis_pci.c index a846036ea022..9e89e045213a 100644 --- a/drivers/media/pci/mantis/mantis_pci.c +++ b/drivers/media/pci/mantis/mantis_pci.c @@ -143,7 +143,6 @@ fail1: fail0: dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret); - pci_set_drvdata(pdev, NULL); return ret; } EXPORT_SYMBOL_GPL(mantis_pci_init); @@ -161,7 +160,6 @@ void mantis_pci_exit(struct mantis_pci *mantis) } pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } EXPORT_SYMBOL_GPL(mantis_pci_exit); diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index 2381b05432e6..54d5c821007c 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -1698,7 +1698,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) meye.mchip_irq = pcidev->irq; if (request_irq(meye.mchip_irq, meye_irq, - IRQF_DISABLED | IRQF_SHARED, "meye", meye_irq)) { + IRQF_SHARED, "meye", meye_irq)) { v4l2_err(v4l2_dev, "request_irq failed\n"); goto outreqirq; } diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 37ebc42392ad..970e83308525 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -1622,7 +1622,7 @@ static void ngene_unlink(struct ngene *dev) void ngene_shutdown(struct pci_dev *pdev) { - struct ngene *dev = (struct ngene *)pci_get_drvdata(pdev); + struct ngene *dev = pci_get_drvdata(pdev); if (!dev || !shutdown_workaround) return; @@ -1648,7 +1648,6 @@ void ngene_remove(struct pci_dev *pdev) cxd_detach(dev); ngene_stop(dev); ngene_release_buffers(dev); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); } @@ -1702,6 +1701,5 @@ fail1: ngene_release_buffers(dev); fail0: pci_disable_device(pci_dev); - pci_set_drvdata(pci_dev, NULL); return stat; } diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c index 493828500055..8164d74b46a4 100644 --- a/drivers/media/pci/pluto2/pluto2.c +++ b/drivers/media/pci/pluto2/pluto2.c @@ -736,7 +736,6 @@ err_pci_release_regions: err_pci_disable_device: pci_disable_device(pdev); err_kfree: - pci_set_drvdata(pdev, NULL); kfree(pluto); goto out; } @@ -765,7 +764,6 @@ static void pluto2_remove(struct pci_dev *pdev) pci_iounmap(pdev, pluto->io_mem); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); kfree(pluto); } diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c index 75ce14229e03..db887b0c37b1 100644 --- a/drivers/media/pci/pt1/pt1.c +++ b/drivers/media/pci/pt1/pt1.c @@ -1076,7 +1076,6 @@ static void pt1_remove(struct pci_dev *pdev) pt1_update_power(pt1); pt1_cleanup_adapters(pt1); i2c_del_adapter(&pt1->i2c_adap); - pci_set_drvdata(pdev, NULL); kfree(pt1); pci_iounmap(pdev, regs); pci_release_regions(pdev); @@ -1198,7 +1197,6 @@ err_i2c_del_adapter: err_pt1_cleanup_adapters: pt1_cleanup_adapters(pt1); err_kfree: - pci_set_drvdata(pdev, NULL); kfree(pt1); err_pci_iounmap: pci_iounmap(pdev, regs); diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index dbcdfbf8aed0..dd67c8a400cc 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -1096,7 +1096,7 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum) err = request_irq(dev->pci->irq, saa7134_alsa_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, + IRQF_SHARED, dev->name, (void*) &dev->dmasound); if (err < 0) { diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index 45f0aca597ae..27d7ee709c58 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c @@ -992,7 +992,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev, /* get irq */ err = request_irq(pci_dev->irq, saa7134_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + IRQF_SHARED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name,pci_dev->irq); diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index d37ee37aaefe..57ef5456f1e8 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1232,7 +1232,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev, } err = request_irq(pci_dev->irq, saa7164_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + IRQF_SHARED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name, pci_dev->irq); @@ -1439,7 +1439,6 @@ static void saa7164_finidev(struct pci_dev *pci_dev) /* unregister stuff */ free_irq(pci_dev->irq, dev); - pci_set_drvdata(pci_dev, NULL); mutex_lock(&devlist); list_del(&dev->devlist); diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c index f1cbfe526989..6299d5dadb82 100644 --- a/drivers/media/pci/ttpci/av7110_hw.c +++ b/drivers/media/pci/ttpci/av7110_hw.c @@ -22,7 +22,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at http://www.linuxtv.org/ */ /* for debugging ARM communication: */ @@ -40,6 +40,14 @@ #define _NOHANDSHAKE +/* + * Max transfer size done by av7110_fw_cmd() + * + * The maximum size passed to this function is 6 bytes. The buffer also + * uses two additional ones for type and size. So, 8 bytes is enough. + */ +#define MAX_XFER_SIZE 8 + /**************************************************************************** * DEBI functions ****************************************************************************/ @@ -488,11 +496,18 @@ static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length) int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...) { va_list args; - u16 buf[num + 2]; + u16 buf[MAX_XFER_SIZE]; int i, ret; // dprintk(4, "%p\n", av7110); + if (2 + num > sizeof(buf)) { + printk(KERN_WARNING + "%s: %s len=%d is too big!\n", + KBUILD_MODNAME, __func__, num); + return -EINVAL; + } + buf[0] = ((type << 8) | com); buf[1] = num; diff --git a/drivers/media/pci/zoran/Kconfig b/drivers/media/pci/zoran/Kconfig index 26ca8702e33f..39ec35bd21a5 100644 --- a/drivers/media/pci/zoran/Kconfig +++ b/drivers/media/pci/zoran/Kconfig @@ -1,6 +1,7 @@ config VIDEO_ZORAN tristate "Zoran ZR36057/36067 Video For Linux" depends on PCI && I2C_ALGOBIT && VIDEO_V4L2 && VIRT_TO_BUS + depends on !ALPHA help Say Y for support for MJPEG capture cards based on the Zoran 36057/36067 PCI controller chipset. This includes the Iomega diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c index 923d59a321f8..cec5b7553f28 100644 --- a/drivers/media/pci/zoran/zoran_card.c +++ b/drivers/media/pci/zoran/zoran_card.c @@ -1293,7 +1293,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } result = request_irq(zr->pci_dev->irq, zoran_irq, - IRQF_SHARED | IRQF_DISABLED, ZR_DEVNAME(zr), zr); + IRQF_SHARED, ZR_DEVNAME(zr), zr); if (result < 0) { if (result == -EINVAL) { dprintk(1, diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index eb70dda8cbf3..d7f0249e4050 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -143,6 +143,7 @@ if V4L_MEM2MEM_DRIVERS config VIDEO_CODA tristate "Chips&Media Coda multi-standard codec IP" depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC + select SRAM select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV ---help--- @@ -212,7 +213,7 @@ config VIDEO_SH_VEU config VIDEO_RENESAS_VSP1 tristate "Renesas VSP1 Video Processing Engine" - depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA select VIDEOBUF2_DMA_CONTIG ---help--- This is a V4L2 driver for the Renesas VSP1 video processing engine. @@ -220,6 +221,22 @@ config VIDEO_RENESAS_VSP1 To compile this driver as a module, choose M here: the module will be called vsp1. +config VIDEO_TI_VPE + tristate "TI VPE (Video Processing Engine) driver" + depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX + select VIDEOBUF2_DMA_CONTIG + select V4L2_MEM2MEM_DEV + default n + ---help--- + Support for the TI VPE(Video Processing Engine) block + found on DRA7XX SoC. + +config VIDEO_TI_VPE_DEBUG + bool "VPE debug messages" + depends on VIDEO_TI_VPE + ---help--- + Enable debug messages on VPE driver. + endif # V4L_MEM2MEM_DRIVERS menuconfig V4L_TEST_DRIVERS diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 4e4da482c522..1348ba1faf92 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o +obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/ + obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o obj-$(CONFIG_VIDEO_CODA) += coda.o diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index 4993610051ee..bd72fb97fea5 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -39,7 +39,7 @@ #define CODA_NAME "coda" -#define CODA_MAX_INSTANCES 4 +#define CODADX6_MAX_INSTANCES 4 #define CODA_FMO_BUF_SIZE 32 #define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024) @@ -54,8 +54,6 @@ #define CODA_MAX_FRAMEBUFFERS 8 -#define MAX_W 8192 -#define MAX_H 8192 #define CODA_MAX_FRAME_SIZE 0x100000 #define FMO_SLICE_SAVE_BUF_SIZE (32) #define CODA_DEFAULT_GAMMA 4096 @@ -394,14 +392,57 @@ static struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc, return &codecs[k]; } +static void coda_get_max_dimensions(struct coda_dev *dev, + struct coda_codec *codec, + int *max_w, int *max_h) +{ + struct coda_codec *codecs = dev->devtype->codecs; + int num_codecs = dev->devtype->num_codecs; + unsigned int w, h; + int k; + + if (codec) { + w = codec->max_w; + h = codec->max_h; + } else { + for (k = 0, w = 0, h = 0; k < num_codecs; k++) { + w = max(w, codecs[k].max_w); + h = max(h, codecs[k].max_h); + } + } + + if (max_w) + *max_w = w; + if (max_h) + *max_h = h; +} + +static char *coda_product_name(int product) +{ + static char buf[9]; + + switch (product) { + case CODA_DX6: + return "CodaDx6"; + case CODA_7541: + return "CODA7541"; + default: + snprintf(buf, sizeof(buf), "(0x%04x)", product); + return buf; + } +} + /* * V4L2 ioctl() operations. */ -static int vidioc_querycap(struct file *file, void *priv, - struct v4l2_capability *cap) +static int coda_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) { + struct coda_ctx *ctx = fh_to_ctx(priv); + strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver)); - strlcpy(cap->card, CODA_NAME, sizeof(cap->card)); + strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product), + sizeof(cap->card)); strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info)); /* * This is only a mem-to-mem video device. The capture and output @@ -457,6 +498,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f, fmt = &formats[i]; strlcpy(f->description, fmt->name, sizeof(f->description)); f->pixelformat = fmt->fourcc; + if (!coda_format_is_yuv(fmt->fourcc)) + f->flags |= V4L2_FMT_FLAG_COMPRESSED; return 0; } @@ -464,8 +507,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f, return -EINVAL; } -static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_fmtdesc *f) +static int coda_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct vb2_queue *src_vq; @@ -483,13 +526,14 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0); } -static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, - struct v4l2_fmtdesc *f) +static int coda_enum_fmt_vid_out(struct file *file, void *priv, + struct v4l2_fmtdesc *f) { return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0); } -static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) +static int coda_g_fmt(struct file *file, void *priv, + struct v4l2_format *f) { struct vb2_queue *vq; struct coda_q_data *q_data; @@ -516,8 +560,11 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) return 0; } -static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f) +static int coda_try_fmt(struct coda_ctx *ctx, struct coda_codec *codec, + struct v4l2_format *f) { + struct coda_dev *dev = ctx->dev; + struct coda_q_data *q_data; unsigned int max_w, max_h; enum v4l2_field field; @@ -531,32 +578,48 @@ static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f) * if any of the dimensions is unsupported */ f->fmt.pix.field = field; - if (codec) { - max_w = codec->max_w; - max_h = codec->max_h; - } else { - max_w = MAX_W; - max_h = MAX_H; + coda_get_max_dimensions(dev, codec, &max_w, &max_h); + v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN, + &f->fmt.pix.height, MIN_H, max_h, H_ALIGN, + S_ALIGN); + + switch (f->fmt.pix.pixelformat) { + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_MPEG4: + case V4L2_PIX_FMT_JPEG: + break; + default: + q_data = get_q_data(ctx, f->type); + f->fmt.pix.pixelformat = q_data->fourcc; } - v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, - W_ALIGN, &f->fmt.pix.height, - MIN_H, max_h, H_ALIGN, S_ALIGN); - if (coda_format_is_yuv(f->fmt.pix.pixelformat)) { + switch (f->fmt.pix.pixelformat) { + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: /* Frame stride must be multiple of 8 */ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 8); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height * 3 / 2; - } else { /*encoded formats h.264/mpeg4 */ + break; + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_MPEG4: + case V4L2_PIX_FMT_JPEG: f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE; + break; + default: + BUG(); } + f->fmt.pix.priv = 0; + return 0; } -static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct coda_codec *codec; @@ -584,7 +647,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, f->fmt.pix.colorspace = ctx->colorspace; - ret = vidioc_try_fmt(codec, f); + ret = coda_try_fmt(ctx, codec, f); if (ret < 0) return ret; @@ -600,8 +663,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, return 0; } -static int vidioc_try_fmt_vid_out(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_try_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct coda_codec *codec; @@ -613,10 +676,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv, if (!f->fmt.pix.colorspace) f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; - return vidioc_try_fmt(codec, f); + return coda_try_fmt(ctx, codec, f); } -static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) +static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) { struct coda_q_data *q_data; struct vb2_queue *vq; @@ -646,61 +709,62 @@ static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) return 0; } -static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; - ret = vidioc_try_fmt_vid_cap(file, priv, f); + ret = coda_try_fmt_vid_cap(file, priv, f); if (ret) return ret; - return vidioc_s_fmt(ctx, f); + return coda_s_fmt(ctx, f); } -static int vidioc_s_fmt_vid_out(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_s_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; - ret = vidioc_try_fmt_vid_out(file, priv, f); + ret = coda_try_fmt_vid_out(file, priv, f); if (ret) return ret; - ret = vidioc_s_fmt(ctx, f); + ret = coda_s_fmt(ctx, f); if (ret) ctx->colorspace = f->fmt.pix.colorspace; return ret; } -static int vidioc_reqbufs(struct file *file, void *priv, - struct v4l2_requestbuffers *reqbufs) +static int coda_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *reqbufs) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } -static int vidioc_querybuf(struct file *file, void *priv, - struct v4l2_buffer *buf) +static int coda_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } -static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +static int coda_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } -static int vidioc_expbuf(struct file *file, void *priv, - struct v4l2_exportbuffer *eb) +static int coda_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) { struct coda_ctx *ctx = fh_to_ctx(priv); @@ -718,7 +782,8 @@ static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx, (buf->sequence == (ctx->qsequence - 1))); } -static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +static int coda_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; @@ -738,24 +803,24 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) return ret; } -static int vidioc_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *create) +static int coda_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create); } -static int vidioc_streamon(struct file *file, void *priv, - enum v4l2_buf_type type) +static int coda_streamon(struct file *file, void *priv, + enum v4l2_buf_type type) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } -static int vidioc_streamoff(struct file *file, void *priv, - enum v4l2_buf_type type) +static int coda_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; @@ -772,23 +837,34 @@ static int vidioc_streamoff(struct file *file, void *priv, return ret; } -static int vidioc_decoder_cmd(struct file *file, void *fh, - struct v4l2_decoder_cmd *dc) +static int coda_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) { - struct coda_ctx *ctx = fh_to_ctx(fh); - if (dc->cmd != V4L2_DEC_CMD_STOP) return -EINVAL; - if ((dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) || - (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY)) + if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) return -EINVAL; - if (dc->stop.pts != 0) + if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0)) return -EINVAL; + return 0; +} + +static int coda_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) +{ + struct coda_ctx *ctx = fh_to_ctx(fh); + int ret; + + ret = coda_try_decoder_cmd(file, fh, dc); + if (ret < 0) + return ret; + + /* Ignore decoder stop command silently in encoder context */ if (ctx->inst_type != CODA_INST_DECODER) - return -EINVAL; + return 0; /* Set the strem-end flag on this context */ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG; @@ -796,8 +872,8 @@ static int vidioc_decoder_cmd(struct file *file, void *fh, return 0; } -static int vidioc_subscribe_event(struct v4l2_fh *fh, - const struct v4l2_event_subscription *sub) +static int coda_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_EOS: @@ -808,32 +884,33 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh, } static const struct v4l2_ioctl_ops coda_ioctl_ops = { - .vidioc_querycap = vidioc_querycap, + .vidioc_querycap = coda_querycap, - .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, - .vidioc_g_fmt_vid_cap = vidioc_g_fmt, - .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, + .vidioc_enum_fmt_vid_cap = coda_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = coda_g_fmt, + .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap, - .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, - .vidioc_g_fmt_vid_out = vidioc_g_fmt, - .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, - .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, + .vidioc_enum_fmt_vid_out = coda_enum_fmt_vid_out, + .vidioc_g_fmt_vid_out = coda_g_fmt, + .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out, + .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out, - .vidioc_reqbufs = vidioc_reqbufs, - .vidioc_querybuf = vidioc_querybuf, + .vidioc_reqbufs = coda_reqbufs, + .vidioc_querybuf = coda_querybuf, - .vidioc_qbuf = vidioc_qbuf, - .vidioc_expbuf = vidioc_expbuf, - .vidioc_dqbuf = vidioc_dqbuf, - .vidioc_create_bufs = vidioc_create_bufs, + .vidioc_qbuf = coda_qbuf, + .vidioc_expbuf = coda_expbuf, + .vidioc_dqbuf = coda_dqbuf, + .vidioc_create_bufs = coda_create_bufs, - .vidioc_streamon = vidioc_streamon, - .vidioc_streamoff = vidioc_streamoff, + .vidioc_streamon = coda_streamon, + .vidioc_streamoff = coda_streamoff, - .vidioc_decoder_cmd = vidioc_decoder_cmd, + .vidioc_try_decoder_cmd = coda_try_decoder_cmd, + .vidioc_decoder_cmd = coda_decoder_cmd, - .vidioc_subscribe_event = vidioc_subscribe_event, + .vidioc_subscribe_event = coda_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; @@ -1928,8 +2005,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count) if (!(ctx->streamon_out & ctx->streamon_cap)) return 0; - /* Allow device_run with no buffers queued and after streamoff */ - v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true); + /* Allow decoder device_run with no new buffers queued */ + if (ctx->inst_type == CODA_INST_DECODER) + v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true); ctx->gopcounter = ctx->params.gop_size - 1; buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); @@ -2071,10 +2149,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count) coda_setup_iram(ctx); if (dst_fourcc == V4L2_PIX_FMT_H264) { - value = (FMO_SLICE_SAVE_BUF_SIZE << 7); - value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET; - value |= 0 & CODA_FMOPARAM_SLICENUM_MASK; if (dev->devtype->product == CODA_DX6) { + value = FMO_SLICE_SAVE_BUF_SIZE << 7; coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO); } else { coda_write(dev, ctx->iram_info.search_ram_paddr, @@ -2371,7 +2447,13 @@ static int coda_queue_init(void *priv, struct vb2_queue *src_vq, static int coda_next_free_instance(struct coda_dev *dev) { - return ffz(dev->instance_mask); + int idx = ffz(dev->instance_mask); + + if ((idx < 0) || + (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES)) + return -EBUSY; + + return idx; } static int coda_open(struct file *file) @@ -2386,8 +2468,8 @@ static int coda_open(struct file *file) return -ENOMEM; idx = coda_next_free_instance(dev); - if (idx >= CODA_MAX_INSTANCES) { - ret = -EBUSY; + if (idx < 0) { + ret = idx; goto err_coda_max; } set_bit(idx, &dev->instance_mask); @@ -2719,7 +2801,6 @@ static void coda_finish_encode(struct coda_ctx *ctx) dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); /* Get results from the coda */ - coda_read(dev, CODA_RET_ENC_PIC_TYPE); start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START); wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); @@ -2739,7 +2820,7 @@ static void coda_finish_encode(struct coda_ctx *ctx) coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM); coda_read(dev, CODA_RET_ENC_PIC_FLAG); - if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) { + if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) { dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME; } else { @@ -2861,21 +2942,6 @@ static bool coda_firmware_supported(u32 vernum) return false; } -static char *coda_product_name(int product) -{ - static char buf[9]; - - switch (product) { - case CODA_DX6: - return "CodaDx6"; - case CODA_7541: - return "CODA7541"; - default: - snprintf(buf, sizeof(buf), "(0x%04x)", product); - return buf; - } -} - static int coda_hw_init(struct coda_dev *dev) { u16 product, major, minor, release; diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index 04609cc6eba7..eac472b5ae83 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -1785,7 +1785,7 @@ static int vpbe_display_probe(struct platform_device *pdev) } irq = res->start; - err = devm_request_irq(&pdev->dev, irq, venc_isr, IRQF_DISABLED, + err = devm_request_irq(&pdev->dev, irq, venc_isr, 0, VPBE_DISPLAY_DRIVER, disp_dev); if (err) { v4l2_err(&disp_dev->vpbe_dev->v4l2_dev, diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 93609091cb23..d762246eabf5 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -688,7 +688,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev) frame_format = ccdc_dev->hw_ops.get_frame_format(); if (frame_format == CCDC_FRMFMT_PROGRESSIVE) { return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr, - IRQF_DISABLED, "vpfe_capture1", + 0, "vpfe_capture1", vpfe_dev); } return 0; @@ -1863,7 +1863,7 @@ static int vpfe_probe(struct platform_device *pdev) } vpfe_dev->ccdc_irq1 = res1->start; - ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED, + ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0, "vpfe_capture0", vpfe_dev); if (0 != ret) { diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 1089834a4efe..52ac5e6c8625 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -2154,7 +2154,7 @@ static __init int vpif_probe(struct platform_device *pdev) if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); - err = -ENOMEM; + err = -ENODEV; goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h index 76435d3bf62d..ef0a6564cef9 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.h +++ b/drivers/media/platform/exynos-gsc/gsc-core.h @@ -45,6 +45,7 @@ #define GSC_DST_FMT (1 << 2) #define GSC_CTX_M2M (1 << 3) #define GSC_CTX_STOP_REQ (1 << 6) +#define GSC_CTX_ABORT (1 << 7) enum gsc_dev_flags { /* for global */ diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index e576ff2de3de..810c3e13970c 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -46,6 +46,17 @@ static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx) return ret == 0 ? -ETIMEDOUT : ret; } +static void __gsc_m2m_job_abort(struct gsc_ctx *ctx) +{ + int ret; + + ret = gsc_m2m_ctx_stop_req(ctx); + if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) { + gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx); + gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + } +} + static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count) { struct gsc_ctx *ctx = q->drv_priv; @@ -58,11 +69,8 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count) static int gsc_m2m_stop_streaming(struct vb2_queue *q) { struct gsc_ctx *ctx = q->drv_priv; - int ret; - ret = gsc_m2m_ctx_stop_req(ctx); - if (ret == -ETIMEDOUT) - gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + __gsc_m2m_job_abort(ctx); pm_runtime_put(&ctx->gsc_dev->pdev->dev); @@ -91,15 +99,9 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state) } } - static void gsc_m2m_job_abort(void *priv) { - struct gsc_ctx *ctx = priv; - int ret; - - ret = gsc_m2m_ctx_stop_req(ctx); - if (ret == -ETIMEDOUT) - gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + __gsc_m2m_job_abort((struct gsc_ctx *)priv); } static int gsc_get_bufs(struct gsc_ctx *ctx) @@ -150,9 +152,10 @@ static void gsc_m2m_device_run(void *priv) gsc->m2m.ctx = ctx; } - is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0; - ctx->state &= ~GSC_CTX_STOP_REQ; + is_set = ctx->state & GSC_CTX_STOP_REQ; if (is_set) { + ctx->state &= ~GSC_CTX_STOP_REQ; + ctx->state |= GSC_CTX_ABORT; wake_up(&gsc->irq_queue); goto put_device; } diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c index d2e6cba3566d..f3c6136aa5b4 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.c +++ b/drivers/media/platform/exynos4-is/fimc-isp.c @@ -511,7 +511,7 @@ static int __ctrl_set_metering(struct fimc_is *is, unsigned int value) break; default: return -EINVAL; - }; + } __is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val); return 0; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index a83511278317..7a4ee4c0449d 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -411,8 +411,8 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd, device_lock(&client->dev); - if (!client->driver || - !try_module_get(client->driver->driver.owner)) { + if (!client->dev.driver || + !try_module_get(client->dev.driver->owner)) { ret = -EPROBE_DEFER; v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n", node->full_name); @@ -442,7 +442,7 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd, fmd->num_sensors++; mod_put: - module_put(client->driver->driver.owner); + module_put(client->dev.driver->owner); dev_put: device_unlock(&client->dev); put_device(&client->dev); diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 540516ca872c..65cab70fefcb 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c @@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op, ctx->xt->dir = DMA_MEM_TO_MEM; ctx->xt->src_sgl = false; ctx->xt->dst_sgl = true; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); if (tx == NULL) { @@ -1084,8 +1083,7 @@ free_dev: static int deinterlace_remove(struct platform_device *pdev) { - struct deinterlace_dev *pcdev = - (struct deinterlace_dev *)platform_get_drvdata(pdev); + struct deinterlace_dev *pcdev = platform_get_drvdata(pdev); v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); v4l2_m2m_release(pcdev->m2m_dev); diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 5184887b155c..32fab30a9105 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) { struct mcam_vb_buffer *mvb = vb_to_mvb(vb); struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); - struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); struct mcam_dma_desc *desc = mvb->dma_desc; struct scatterlist *sg; int i; - mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages, - DMA_FROM_DEVICE); + mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl, + sg_table->nents, DMA_FROM_DEVICE); if (mvb->dma_desc_nent <= 0) return -EIO; /* Not sure what's right here */ - for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) { + for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) { desc->dma_addr = sg_dma_address(sg); desc->segment_len = sg_dma_len(sg); desc++; @@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb) { struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); - struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); - dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE); + if (sg_table) + dma_unmap_sg(cam->dev, sg_table->sgl, + sg_table->nents, DMA_FROM_DEVICE); return 0; } diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index b5a19af5c587..3458fa0e2fd5 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -481,7 +481,6 @@ static int mmpcam_remove(struct mmp_camera *cam) struct mmp_camera_platform_data *pdata; mmpcam_remove_device(cam); - free_irq(cam->irq, mcam); mccic_shutdown(mcam); mmpcam_power_down(mcam); pdata = cam->pdev->dev.platform_data; diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c index 6a17676f9d72..8df5975b700a 100644 --- a/drivers/media/platform/mem2mem_testdev.c +++ b/drivers/media/platform/mem2mem_testdev.c @@ -1090,8 +1090,7 @@ unreg_dev: static int m2mtest_remove(struct platform_device *pdev) { - struct m2mtest_dev *dev = - (struct m2mtest_dev *)platform_get_drvdata(pdev); + struct m2mtest_dev *dev = platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); v4l2_m2m_release(dev->m2m_dev); diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index fd6289d60cde..0b2948376aee 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -840,7 +840,7 @@ put_clk: static int g2d_remove(struct platform_device *pdev) { - struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev); + struct g2d_dev *dev = platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME); v4l2_m2m_release(dev->m2m_dev); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 084263dd126f..5f2c4ad6c2cb 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -404,7 +404,11 @@ leave_handle_frame: if (test_and_clear_bit(0, &dev->hw_lock) == 0) BUG(); s5p_mfc_clock_off(); - s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); + /* if suspending, wake up device and do not try_run again*/ + if (test_bit(0, &dev->enter_suspend)) + wake_up_dev(dev, reason, err); + else + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } /* Error handling for interrupt */ @@ -1101,7 +1105,7 @@ static int s5p_mfc_probe(struct platform_device *pdev) } dev->irq = res->start; ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq, - IRQF_DISABLED, pdev->name, dev); + 0, pdev->name, dev); if (ret) { dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret); goto err_res; @@ -1286,9 +1290,7 @@ static int s5p_mfc_suspend(struct device *dev) /* Try and lock the HW */ /* Wait on the interrupt waitqueue */ ret = wait_event_interruptible_timeout(m_dev->queue, - m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond, - msecs_to_jiffies(MFC_INT_TIMEOUT)); - + m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT)); if (ret == 0) { mfc_err("Waiting for hardware to finish timed out\n"); return -EIO; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c index ad4f1df0a18e..9a6efd6c1329 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c @@ -111,7 +111,7 @@ static int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx) break; default: h2r_args.arg[0] = S5P_FIMV_CODEC_NONE; - }; + } h2r_args.arg[1] = 0; /* no crc & no pixelcache */ h2r_args.arg[2] = ctx->ctx.ofs; h2r_args.arg[3] = ctx->ctx.size; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c index db796c8e7874..ec1a5947ed7d 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c @@ -113,7 +113,7 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx) break; default: codec_type = S5P_FIMV_CODEC_NONE_V6; - }; + } mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6); mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6); mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 41f5a3c10dbd..4ff3b6cd6842 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -113,7 +113,7 @@ static struct mfc_control controls[] = { .minimum = 0, .maximum = (1 << 16) - 1, .step = 1, - .default_value = 0, + .default_value = 12, }, { .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, @@ -356,7 +356,7 @@ static struct mfc_control controls[] = { .minimum = 0, .maximum = 51, .step = 1, - .default_value = 1, + .default_value = 51, }, { .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, @@ -399,7 +399,7 @@ static struct mfc_control controls[] = { .minimum = 1, .maximum = 31, .step = 1, - .default_value = 1, + .default_value = 31, }, { .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP, @@ -444,7 +444,7 @@ static struct mfc_control controls[] = { .minimum = 0, .maximum = 51, .step = 1, - .default_value = 1, + .default_value = 51, }, { .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 368582b091bf..58ec7bb26ebc 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1582,7 +1582,7 @@ static int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev) break; default: reason = S5P_MFC_R2H_CMD_EMPTY; - }; + } return reason; } diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c index b93a21f5aa13..74344c764daa 100644 --- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c +++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c @@ -226,7 +226,7 @@ static void mxr_graph_fix_geometry(struct mxr_layer *layer, src->width + src->x_offset, 32767); src->full_height = clamp_val(src->full_height, src->height + src->y_offset, 2047); - }; + } } /* PUBLIC API */ diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c index 3d13a636877b..c9388c45ad75 100644 --- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c +++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c @@ -197,7 +197,7 @@ static void mxr_vp_fix_geometry(struct mxr_layer *layer, ALIGN(src->width + src->x_offset, 8), 8192U); src->full_height = clamp(src->full_height, src->height + src->y_offset, 8192U); - }; + } } /* PUBLIC API */ diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index d02a7e0b773f..6866bb4fbebc 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_data/camera-rcar.h> @@ -105,6 +106,7 @@ #define VIN_MAX_HEIGHT 2048 enum chip_id { + RCAR_H2, RCAR_H1, RCAR_M1, RCAR_E1, @@ -300,7 +302,8 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv) dmr = 0; break; case V4L2_PIX_FMT_RGB32: - if (priv->chip == RCAR_H1 || priv->chip == RCAR_E1) { + if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 || + priv->chip == RCAR_E1) { dmr = VNDMR_EXRGB; break; } @@ -1381,6 +1384,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = { }; static struct platform_device_id rcar_vin_id_table[] = { + { "r8a7790-vin", RCAR_H2 }, { "r8a7779-vin", RCAR_H1 }, { "r8a7778-vin", RCAR_M1 }, { "uPD35004-vin", RCAR_E1 }, diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 8df22f779175..150bd4df413c 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -1800,7 +1800,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) /* request irq */ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, - IRQF_DISABLED, dev_name(&pdev->dev), pcdev); + 0, dev_name(&pdev->dev), pcdev); if (err) { dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); goto exit_release_mem; diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index 387a232d95a4..4b8c024fc487 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -71,13 +71,23 @@ static int video_dev_create(struct soc_camera_device *icd); int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, struct v4l2_clk *clk) { - int ret = clk ? v4l2_clk_enable(clk) : 0; - if (ret < 0) { - dev_err(dev, "Cannot enable clock: %d\n", ret); - return ret; + int ret; + bool clock_toggle; + + if (clk && (!ssdd->unbalanced_power || + !test_and_set_bit(0, &ssdd->clock_state))) { + ret = v4l2_clk_enable(clk); + if (ret < 0) { + dev_err(dev, "Cannot enable clock: %d\n", ret); + return ret; + } + clock_toggle = true; + } else { + clock_toggle = false; } - ret = regulator_bulk_enable(ssdd->num_regulators, - ssdd->regulators); + + ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); if (ret < 0) { dev_err(dev, "Cannot enable regulators\n"); goto eregenable; @@ -95,10 +105,10 @@ int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, return 0; epwron: - regulator_bulk_disable(ssdd->num_regulators, - ssdd->regulators); + regulator_bulk_disable(ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); eregenable: - if (clk) + if (clock_toggle) v4l2_clk_disable(clk); return ret; @@ -120,14 +130,14 @@ int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd } } - err = regulator_bulk_disable(ssdd->num_regulators, - ssdd->regulators); + err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); if (err < 0) { dev_err(dev, "Cannot disable regulators\n"); ret = ret ? : err; } - if (clk) + if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state))) v4l2_clk_disable(clk); return ret; @@ -137,8 +147,8 @@ EXPORT_SYMBOL(soc_camera_power_off); int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd) { /* Should not have any effect in synchronous case */ - return devm_regulator_bulk_get(dev, ssdd->num_regulators, - ssdd->regulators); + return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); } EXPORT_SYMBOL(soc_camera_power_init); @@ -1346,8 +1356,8 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd, * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try * to allocate them again. */ - ssdd->num_regulators = 0; - ssdd->regulators = NULL; + ssdd->sd_pdata.num_regulators = 0; + ssdd->sd_pdata.regulators = NULL; shd->board_info->platform_data = ssdd; snprintf(clk_name, sizeof(clk_name), "%d-%04x", @@ -2020,8 +2030,8 @@ static int soc_camera_pdrv_probe(struct platform_device *pdev) * that case regulators are attached to the I2C device and not to the * camera platform device. */ - ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators, - ssdd->regulators); + ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); if (ret < 0) return ret; diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile new file mode 100644 index 000000000000..cbf0a806ba1d --- /dev/null +++ b/drivers/media/platform/ti-vpe/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o + +ti-vpe-y := vpe.o vpdma.o + +ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c new file mode 100644 index 000000000000..af0a5ffcaa98 --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpdma.c @@ -0,0 +1,846 @@ +/* + * VPDMA helper library + * + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/firmware.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/videodev2.h> + +#include "vpdma.h" +#include "vpdma_priv.h" + +#define VPDMA_FIRMWARE "vpdma-1b8.bin" + +const struct vpdma_data_format vpdma_yuv_fmts[] = { + [VPDMA_DATA_FMT_Y444] = { + .data_type = DATA_TYPE_Y444, + .depth = 8, + }, + [VPDMA_DATA_FMT_Y422] = { + .data_type = DATA_TYPE_Y422, + .depth = 8, + }, + [VPDMA_DATA_FMT_Y420] = { + .data_type = DATA_TYPE_Y420, + .depth = 8, + }, + [VPDMA_DATA_FMT_C444] = { + .data_type = DATA_TYPE_C444, + .depth = 8, + }, + [VPDMA_DATA_FMT_C422] = { + .data_type = DATA_TYPE_C422, + .depth = 8, + }, + [VPDMA_DATA_FMT_C420] = { + .data_type = DATA_TYPE_C420, + .depth = 4, + }, + [VPDMA_DATA_FMT_YC422] = { + .data_type = DATA_TYPE_YC422, + .depth = 16, + }, + [VPDMA_DATA_FMT_YC444] = { + .data_type = DATA_TYPE_YC444, + .depth = 24, + }, + [VPDMA_DATA_FMT_CY422] = { + .data_type = DATA_TYPE_CY422, + .depth = 16, + }, +}; + +const struct vpdma_data_format vpdma_rgb_fmts[] = { + [VPDMA_DATA_FMT_RGB565] = { + .data_type = DATA_TYPE_RGB16_565, + .depth = 16, + }, + [VPDMA_DATA_FMT_ARGB16_1555] = { + .data_type = DATA_TYPE_ARGB_1555, + .depth = 16, + }, + [VPDMA_DATA_FMT_ARGB16] = { + .data_type = DATA_TYPE_ARGB_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_RGBA16_5551] = { + .data_type = DATA_TYPE_RGBA_5551, + .depth = 16, + }, + [VPDMA_DATA_FMT_RGBA16] = { + .data_type = DATA_TYPE_RGBA_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_ARGB24] = { + .data_type = DATA_TYPE_ARGB24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_RGB24] = { + .data_type = DATA_TYPE_RGB24_888, + .depth = 24, + }, + [VPDMA_DATA_FMT_ARGB32] = { + .data_type = DATA_TYPE_ARGB32_8888, + .depth = 32, + }, + [VPDMA_DATA_FMT_RGBA24] = { + .data_type = DATA_TYPE_RGBA24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_RGBA32] = { + .data_type = DATA_TYPE_RGBA32_8888, + .depth = 32, + }, + [VPDMA_DATA_FMT_BGR565] = { + .data_type = DATA_TYPE_BGR16_565, + .depth = 16, + }, + [VPDMA_DATA_FMT_ABGR16_1555] = { + .data_type = DATA_TYPE_ABGR_1555, + .depth = 16, + }, + [VPDMA_DATA_FMT_ABGR16] = { + .data_type = DATA_TYPE_ABGR_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_BGRA16_5551] = { + .data_type = DATA_TYPE_BGRA_5551, + .depth = 16, + }, + [VPDMA_DATA_FMT_BGRA16] = { + .data_type = DATA_TYPE_BGRA_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_ABGR24] = { + .data_type = DATA_TYPE_ABGR24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_BGR24] = { + .data_type = DATA_TYPE_BGR24_888, + .depth = 24, + }, + [VPDMA_DATA_FMT_ABGR32] = { + .data_type = DATA_TYPE_ABGR32_8888, + .depth = 32, + }, + [VPDMA_DATA_FMT_BGRA24] = { + .data_type = DATA_TYPE_BGRA24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_BGRA32] = { + .data_type = DATA_TYPE_BGRA32_8888, + .depth = 32, + }, +}; + +const struct vpdma_data_format vpdma_misc_fmts[] = { + [VPDMA_DATA_FMT_MV] = { + .data_type = DATA_TYPE_MV, + .depth = 4, + }, +}; + +struct vpdma_channel_info { + int num; /* VPDMA channel number */ + int cstat_offset; /* client CSTAT register offset */ +}; + +static const struct vpdma_channel_info chan_info[] = { + [VPE_CHAN_LUMA1_IN] = { + .num = VPE_CHAN_NUM_LUMA1_IN, + .cstat_offset = VPDMA_DEI_LUMA1_CSTAT, + }, + [VPE_CHAN_CHROMA1_IN] = { + .num = VPE_CHAN_NUM_CHROMA1_IN, + .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT, + }, + [VPE_CHAN_LUMA2_IN] = { + .num = VPE_CHAN_NUM_LUMA2_IN, + .cstat_offset = VPDMA_DEI_LUMA2_CSTAT, + }, + [VPE_CHAN_CHROMA2_IN] = { + .num = VPE_CHAN_NUM_CHROMA2_IN, + .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT, + }, + [VPE_CHAN_LUMA3_IN] = { + .num = VPE_CHAN_NUM_LUMA3_IN, + .cstat_offset = VPDMA_DEI_LUMA3_CSTAT, + }, + [VPE_CHAN_CHROMA3_IN] = { + .num = VPE_CHAN_NUM_CHROMA3_IN, + .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT, + }, + [VPE_CHAN_MV_IN] = { + .num = VPE_CHAN_NUM_MV_IN, + .cstat_offset = VPDMA_DEI_MV_IN_CSTAT, + }, + [VPE_CHAN_MV_OUT] = { + .num = VPE_CHAN_NUM_MV_OUT, + .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT, + }, + [VPE_CHAN_LUMA_OUT] = { + .num = VPE_CHAN_NUM_LUMA_OUT, + .cstat_offset = VPDMA_VIP_UP_Y_CSTAT, + }, + [VPE_CHAN_CHROMA_OUT] = { + .num = VPE_CHAN_NUM_CHROMA_OUT, + .cstat_offset = VPDMA_VIP_UP_UV_CSTAT, + }, + [VPE_CHAN_RGB_OUT] = { + .num = VPE_CHAN_NUM_RGB_OUT, + .cstat_offset = VPDMA_VIP_UP_Y_CSTAT, + }, +}; + +static u32 read_reg(struct vpdma_data *vpdma, int offset) +{ + return ioread32(vpdma->base + offset); +} + +static void write_reg(struct vpdma_data *vpdma, int offset, u32 value) +{ + iowrite32(value, vpdma->base + offset); +} + +static int read_field_reg(struct vpdma_data *vpdma, int offset, + u32 mask, int shift) +{ + return (read_reg(vpdma, offset) & (mask << shift)) >> shift; +} + +static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field, + u32 mask, int shift) +{ + u32 val = read_reg(vpdma, offset); + + val &= ~(mask << shift); + val |= (field & mask) << shift; + + write_reg(vpdma, offset, val); +} + +void vpdma_dump_regs(struct vpdma_data *vpdma) +{ + struct device *dev = &vpdma->pdev->dev; + +#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r)) + + dev_dbg(dev, "VPDMA Registers:\n"); + + DUMPREG(PID); + DUMPREG(LIST_ADDR); + DUMPREG(LIST_ATTR); + DUMPREG(LIST_STAT_SYNC); + DUMPREG(BG_RGB); + DUMPREG(BG_YUV); + DUMPREG(SETUP); + DUMPREG(MAX_SIZE1); + DUMPREG(MAX_SIZE2); + DUMPREG(MAX_SIZE3); + + /* + * dumping registers of only group0 and group3, because VPE channels + * lie within group0 and group3 registers + */ + DUMPREG(INT_CHAN_STAT(0)); + DUMPREG(INT_CHAN_MASK(0)); + DUMPREG(INT_CHAN_STAT(3)); + DUMPREG(INT_CHAN_MASK(3)); + DUMPREG(INT_CLIENT0_STAT); + DUMPREG(INT_CLIENT0_MASK); + DUMPREG(INT_CLIENT1_STAT); + DUMPREG(INT_CLIENT1_MASK); + DUMPREG(INT_LIST0_STAT); + DUMPREG(INT_LIST0_MASK); + + /* + * these are registers specific to VPE clients, we can make this + * function dump client registers specific to VPE or VIP based on + * who is using it + */ + DUMPREG(DEI_CHROMA1_CSTAT); + DUMPREG(DEI_LUMA1_CSTAT); + DUMPREG(DEI_CHROMA2_CSTAT); + DUMPREG(DEI_LUMA2_CSTAT); + DUMPREG(DEI_CHROMA3_CSTAT); + DUMPREG(DEI_LUMA3_CSTAT); + DUMPREG(DEI_MV_IN_CSTAT); + DUMPREG(DEI_MV_OUT_CSTAT); + DUMPREG(VIP_UP_Y_CSTAT); + DUMPREG(VIP_UP_UV_CSTAT); + DUMPREG(VPI_CTL_CSTAT); +} + +/* + * Allocate a DMA buffer + */ +int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size) +{ + buf->size = size; + buf->mapped = false; + buf->addr = kzalloc(size, GFP_KERNEL); + if (!buf->addr) + return -ENOMEM; + + WARN_ON((u32) buf->addr & VPDMA_DESC_ALIGN); + + return 0; +} + +void vpdma_free_desc_buf(struct vpdma_buf *buf) +{ + WARN_ON(buf->mapped); + kfree(buf->addr); + buf->addr = NULL; + buf->size = 0; +} + +/* + * map descriptor/payload DMA buffer, enabling DMA access + */ +int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) +{ + struct device *dev = &vpdma->pdev->dev; + + WARN_ON(buf->mapped); + buf->dma_addr = dma_map_single(dev, buf->addr, buf->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, buf->dma_addr)) { + dev_err(dev, "failed to map buffer\n"); + return -EINVAL; + } + + buf->mapped = true; + + return 0; +} + +/* + * unmap descriptor/payload DMA buffer, disabling DMA access and + * allowing the main processor to acces the data + */ +void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) +{ + struct device *dev = &vpdma->pdev->dev; + + if (buf->mapped) + dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE); + + buf->mapped = false; +} + +/* + * create a descriptor list, the user of this list will append configuration, + * control and data descriptors to this list, this list will be submitted to + * VPDMA. VPDMA's list parser will go through each descriptor and perform the + * required DMA operations + */ +int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type) +{ + int r; + + r = vpdma_alloc_desc_buf(&list->buf, size); + if (r) + return r; + + list->next = list->buf.addr; + + list->type = type; + + return 0; +} + +/* + * once a descriptor list is parsed by VPDMA, we reset the list by emptying it, + * to allow new descriptors to be added to the list. + */ +void vpdma_reset_desc_list(struct vpdma_desc_list *list) +{ + list->next = list->buf.addr; +} + +/* + * free the buffer allocated fot the VPDMA descriptor list, this should be + * called when the user doesn't want to use VPDMA any more. + */ +void vpdma_free_desc_list(struct vpdma_desc_list *list) +{ + vpdma_free_desc_buf(&list->buf); + + list->next = NULL; +} + +static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num) +{ + return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16); +} + +/* + * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion + */ +int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list) +{ + /* we always use the first list */ + int list_num = 0; + int list_size; + + if (vpdma_list_busy(vpdma, list_num)) + return -EBUSY; + + /* 16-byte granularity */ + list_size = (list->next - list->buf.addr) >> 4; + + write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr); + + write_reg(vpdma, VPDMA_LIST_ATTR, + (list_num << VPDMA_LIST_NUM_SHFT) | + (list->type << VPDMA_LIST_TYPE_SHFT) | + list_size); + + return 0; +} + +static void dump_cfd(struct vpdma_cfd *cfd) +{ + int class; + + class = cfd_get_class(cfd); + + pr_debug("config descriptor of payload class: %s\n", + class == CFD_CLS_BLOCK ? "simple block" : + "address data block"); + + if (class == CFD_CLS_BLOCK) + pr_debug("word0: dst_addr_offset = 0x%08x\n", + cfd->dest_addr_offset); + + if (class == CFD_CLS_BLOCK) + pr_debug("word1: num_data_wrds = %d\n", cfd->block_len); + + pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr); + + pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, " + "payload_len = %d\n", cfd_get_pkt_type(cfd), + cfd_get_direct(cfd), class, cfd_get_dest(cfd), + cfd_get_payload_len(cfd)); +} + +/* + * append a configuration descriptor to the given descriptor list, where the + * payload is in the form of a simple data block specified in the descriptor + * header, this is used to upload scaler coefficients to the scaler module + */ +void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client, + struct vpdma_buf *blk, u32 dest_offset) +{ + struct vpdma_cfd *cfd; + int len = blk->size; + + WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN); + + cfd = list->next; + WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size)); + + cfd->dest_addr_offset = dest_offset; + cfd->block_len = len; + cfd->payload_addr = (u32) blk->dma_addr; + cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK, + client, len >> 4); + + list->next = cfd + 1; + + dump_cfd(cfd); +} + +/* + * append a configuration descriptor to the given descriptor list, where the + * payload is in the address data block format, this is used to a configure a + * discontiguous set of MMRs + */ +void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client, + struct vpdma_buf *adb) +{ + struct vpdma_cfd *cfd; + unsigned int len = adb->size; + + WARN_ON(len & VPDMA_ADB_SIZE_ALIGN); + WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN); + + cfd = list->next; + BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size)); + + cfd->w0 = 0; + cfd->w1 = 0; + cfd->payload_addr = (u32) adb->dma_addr; + cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB, + client, len >> 4); + + list->next = cfd + 1; + + dump_cfd(cfd); +}; + +/* + * control descriptor format change based on what type of control descriptor it + * is, we only use 'sync on channel' control descriptors for now, so assume it's + * that + */ +static void dump_ctd(struct vpdma_ctd *ctd) +{ + pr_debug("control descriptor\n"); + + pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n", + ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd)); +} + +/* + * append a 'sync on channel' type control descriptor to the given descriptor + * list, this descriptor stalls the VPDMA list till the time DMA is completed + * on the specified channel + */ +void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list, + enum vpdma_channel chan) +{ + struct vpdma_ctd *ctd; + + ctd = list->next; + WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size)); + + ctd->w0 = 0; + ctd->w1 = 0; + ctd->w2 = 0; + ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num, + CTD_TYPE_SYNC_ON_CHANNEL); + + list->next = ctd + 1; + + dump_ctd(ctd); +} + +static void dump_dtd(struct vpdma_dtd *dtd) +{ + int dir, chan; + + dir = dtd_get_dir(dtd); + chan = dtd_get_chan(dtd); + + pr_debug("%s data transfer descriptor for channel %d\n", + dir == DTD_DIR_OUT ? "outbound" : "inbound", chan); + + pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, " + "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n", + dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd), + dtd_get_1d(dtd), dtd_get_even_line_skip(dtd), + dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd)); + + if (dir == DTD_DIR_IN) + pr_debug("word1: line_length = %d, xfer_height = %d\n", + dtd_get_line_length(dtd), dtd_get_xfer_height(dtd)); + + pr_debug("word2: start_addr = 0x%08x\n", dtd->start_addr); + + pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, " + "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd), + dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd), + dtd_get_next_chan(dtd)); + + if (dir == DTD_DIR_IN) + pr_debug("word4: frame_width = %d, frame_height = %d\n", + dtd_get_frame_width(dtd), dtd_get_frame_height(dtd)); + else + pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, " + "drp_data = %d, use_desc_reg = %d\n", + dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd), + dtd_get_drop_data(dtd), dtd_get_use_desc(dtd)); + + if (dir == DTD_DIR_IN) + pr_debug("word5: hor_start = %d, ver_start = %d\n", + dtd_get_h_start(dtd), dtd_get_v_start(dtd)); + else + pr_debug("word5: max_width %d, max_height %d\n", + dtd_get_max_width(dtd), dtd_get_max_height(dtd)); + + pr_debug("word6: client specfic attr0 = 0x%08x\n", dtd->client_attr0); + pr_debug("word7: client specfic attr1 = 0x%08x\n", dtd->client_attr1); +} + +/* + * append an outbound data transfer descriptor to the given descriptor list, + * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel + */ +void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, u32 flags) +{ + int priority = 0; + int field = 0; + int notify = 1; + int channel, next_chan; + int depth = fmt->depth; + int stride; + struct vpdma_dtd *dtd; + + channel = next_chan = chan_info[chan].num; + + if (fmt->data_type == DATA_TYPE_C420) + depth = 8; + + stride = (depth * c_rect->width) >> 3; + dma_addr += (c_rect->left * depth) >> 3; + + dtd = list->next; + WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size)); + + dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type, + notify, + field, + !!(flags & VPDMA_DATA_FRAME_1D), + !!(flags & VPDMA_DATA_EVEN_LINE_SKIP), + !!(flags & VPDMA_DATA_ODD_LINE_SKIP), + stride); + dtd->w1 = 0; + dtd->start_addr = (u32) dma_addr; + dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED), + DTD_DIR_OUT, channel, priority, next_chan); + dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0); + dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920, + MAX_OUT_HEIGHT_1080); + dtd->client_attr0 = 0; + dtd->client_attr1 = 0; + + list->next = dtd + 1; + + dump_dtd(dtd); +} + +/* + * append an inbound data transfer descriptor to the given descriptor list, + * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel + */ +void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width, + int frame_height, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, int field, u32 flags) +{ + int priority = 0; + int notify = 1; + int depth = fmt->depth; + int channel, next_chan; + int stride; + int height = c_rect->height; + struct vpdma_dtd *dtd; + + channel = next_chan = chan_info[chan].num; + + if (fmt->data_type == DATA_TYPE_C420) { + height >>= 1; + frame_height >>= 1; + depth = 8; + } + + stride = (depth * c_rect->width) >> 3; + dma_addr += (c_rect->left * depth) >> 3; + + dtd = list->next; + WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size)); + + dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type, + notify, + field, + !!(flags & VPDMA_DATA_FRAME_1D), + !!(flags & VPDMA_DATA_EVEN_LINE_SKIP), + !!(flags & VPDMA_DATA_ODD_LINE_SKIP), + stride); + + dtd->xfer_length_height = dtd_xfer_length_height(c_rect->width, height); + dtd->start_addr = (u32) dma_addr; + dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED), + DTD_DIR_IN, channel, priority, next_chan); + dtd->frame_width_height = dtd_frame_width_height(frame_width, + frame_height); + dtd->start_h_v = dtd_start_h_v(c_rect->left, c_rect->top); + dtd->client_attr0 = 0; + dtd->client_attr1 = 0; + + list->next = dtd + 1; + + dump_dtd(dtd); +} + +/* set or clear the mask for list complete interrupt */ +void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num, + bool enable) +{ + u32 val; + + val = read_reg(vpdma, VPDMA_INT_LIST0_MASK); + if (enable) + val |= (1 << (list_num * 2)); + else + val &= ~(1 << (list_num * 2)); + write_reg(vpdma, VPDMA_INT_LIST0_MASK, val); +} + +/* clear previosuly occured list intterupts in the LIST_STAT register */ +void vpdma_clear_list_stat(struct vpdma_data *vpdma) +{ + write_reg(vpdma, VPDMA_INT_LIST0_STAT, + read_reg(vpdma, VPDMA_INT_LIST0_STAT)); +} + +/* + * configures the output mode of the line buffer for the given client, the + * line buffer content can either be mirrored(each line repeated twice) or + * passed to the client as is + */ +void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode, + enum vpdma_channel chan) +{ + int client_cstat = chan_info[chan].cstat_offset; + + write_field_reg(vpdma, client_cstat, line_mode, + VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT); +} + +/* + * configures the event which should trigger VPDMA transfer for the given + * client + */ +void vpdma_set_frame_start_event(struct vpdma_data *vpdma, + enum vpdma_frame_start_event fs_event, + enum vpdma_channel chan) +{ + int client_cstat = chan_info[chan].cstat_offset; + + write_field_reg(vpdma, client_cstat, fs_event, + VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT); +} + +static void vpdma_firmware_cb(const struct firmware *f, void *context) +{ + struct vpdma_data *vpdma = context; + struct vpdma_buf fw_dma_buf; + int i, r; + + dev_dbg(&vpdma->pdev->dev, "firmware callback\n"); + + if (!f || !f->data) { + dev_err(&vpdma->pdev->dev, "couldn't get firmware\n"); + return; + } + + /* already initialized */ + if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK, + VPDMA_LIST_RDY_SHFT)) { + vpdma->ready = true; + return; + } + + r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size); + if (r) { + dev_err(&vpdma->pdev->dev, + "failed to allocate dma buffer for firmware\n"); + goto rel_fw; + } + + memcpy(fw_dma_buf.addr, f->data, f->size); + + vpdma_map_desc_buf(vpdma, &fw_dma_buf); + + write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr); + + for (i = 0; i < 100; i++) { /* max 1 second */ + msleep_interruptible(10); + + if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK, + VPDMA_LIST_RDY_SHFT)) + break; + } + + if (i == 100) { + dev_err(&vpdma->pdev->dev, "firmware upload failed\n"); + goto free_buf; + } + + vpdma->ready = true; + +free_buf: + vpdma_unmap_desc_buf(vpdma, &fw_dma_buf); + + vpdma_free_desc_buf(&fw_dma_buf); +rel_fw: + release_firmware(f); +} + +static int vpdma_load_firmware(struct vpdma_data *vpdma) +{ + int r; + struct device *dev = &vpdma->pdev->dev; + + r = request_firmware_nowait(THIS_MODULE, 1, + (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma, + vpdma_firmware_cb); + if (r) { + dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE); + return r; + } else { + dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE); + } + + return 0; +} + +struct vpdma_data *vpdma_create(struct platform_device *pdev) +{ + struct resource *res; + struct vpdma_data *vpdma; + int r; + + dev_dbg(&pdev->dev, "vpdma_create\n"); + + vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL); + if (!vpdma) { + dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n"); + return ERR_PTR(-ENOMEM); + } + + vpdma->pdev = pdev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma"); + if (res == NULL) { + dev_err(&pdev->dev, "missing platform resources data\n"); + return ERR_PTR(-ENODEV); + } + + vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!vpdma->base) { + dev_err(&pdev->dev, "failed to ioremap\n"); + return ERR_PTR(-ENOMEM); + } + + r = vpdma_load_firmware(vpdma); + if (r) { + pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE); + return ERR_PTR(r); + } + + return vpdma; +} +MODULE_FIRMWARE(VPDMA_FIRMWARE); diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h new file mode 100644 index 000000000000..eaa2a71a5db9 --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpdma.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __TI_VPDMA_H_ +#define __TI_VPDMA_H_ + +/* + * A vpdma_buf tracks the size, DMA address and mapping status of each + * driver DMA area. + */ +struct vpdma_buf { + void *addr; + dma_addr_t dma_addr; + size_t size; + bool mapped; +}; + +struct vpdma_desc_list { + struct vpdma_buf buf; + void *next; + int type; +}; + +struct vpdma_data { + void __iomem *base; + + struct platform_device *pdev; + + /* tells whether vpdma firmware is loaded or not */ + bool ready; +}; + +struct vpdma_data_format { + int data_type; + u8 depth; +}; + +#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */ + +#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */ +#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */ + +#define VPDMA_LIST_TYPE_NORMAL 0 +#define VPDMA_LIST_TYPE_SELF_MODIFYING 1 +#define VPDMA_LIST_TYPE_DOORBELL 2 + +enum vpdma_yuv_formats { + VPDMA_DATA_FMT_Y444 = 0, + VPDMA_DATA_FMT_Y422, + VPDMA_DATA_FMT_Y420, + VPDMA_DATA_FMT_C444, + VPDMA_DATA_FMT_C422, + VPDMA_DATA_FMT_C420, + VPDMA_DATA_FMT_YC422, + VPDMA_DATA_FMT_YC444, + VPDMA_DATA_FMT_CY422, +}; + +enum vpdma_rgb_formats { + VPDMA_DATA_FMT_RGB565 = 0, + VPDMA_DATA_FMT_ARGB16_1555, + VPDMA_DATA_FMT_ARGB16, + VPDMA_DATA_FMT_RGBA16_5551, + VPDMA_DATA_FMT_RGBA16, + VPDMA_DATA_FMT_ARGB24, + VPDMA_DATA_FMT_RGB24, + VPDMA_DATA_FMT_ARGB32, + VPDMA_DATA_FMT_RGBA24, + VPDMA_DATA_FMT_RGBA32, + VPDMA_DATA_FMT_BGR565, + VPDMA_DATA_FMT_ABGR16_1555, + VPDMA_DATA_FMT_ABGR16, + VPDMA_DATA_FMT_BGRA16_5551, + VPDMA_DATA_FMT_BGRA16, + VPDMA_DATA_FMT_ABGR24, + VPDMA_DATA_FMT_BGR24, + VPDMA_DATA_FMT_ABGR32, + VPDMA_DATA_FMT_BGRA24, + VPDMA_DATA_FMT_BGRA32, +}; + +enum vpdma_misc_formats { + VPDMA_DATA_FMT_MV = 0, +}; + +extern const struct vpdma_data_format vpdma_yuv_fmts[]; +extern const struct vpdma_data_format vpdma_rgb_fmts[]; +extern const struct vpdma_data_format vpdma_misc_fmts[]; + +enum vpdma_frame_start_event { + VPDMA_FSEVENT_HDMI_FID = 0, + VPDMA_FSEVENT_DVO2_FID, + VPDMA_FSEVENT_HDCOMP_FID, + VPDMA_FSEVENT_SD_FID, + VPDMA_FSEVENT_LM_FID0, + VPDMA_FSEVENT_LM_FID1, + VPDMA_FSEVENT_LM_FID2, + VPDMA_FSEVENT_CHANNEL_ACTIVE, +}; + +/* + * VPDMA channel numbers + */ +enum vpdma_channel { + VPE_CHAN_LUMA1_IN, + VPE_CHAN_CHROMA1_IN, + VPE_CHAN_LUMA2_IN, + VPE_CHAN_CHROMA2_IN, + VPE_CHAN_LUMA3_IN, + VPE_CHAN_CHROMA3_IN, + VPE_CHAN_MV_IN, + VPE_CHAN_MV_OUT, + VPE_CHAN_LUMA_OUT, + VPE_CHAN_CHROMA_OUT, + VPE_CHAN_RGB_OUT, +}; + +/* flags for VPDMA data descriptors */ +#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0) +#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1) +#define VPDMA_DATA_FRAME_1D (1 << 2) +#define VPDMA_DATA_MODE_TILED (1 << 3) + +/* + * client identifiers used for configuration descriptors + */ +#define CFD_MMR_CLIENT 0 +#define CFD_SC_CLIENT 4 + +/* Address data block header format */ +struct vpdma_adb_hdr { + u32 offset; + u32 nwords; + u32 reserved0; + u32 reserved1; +}; + +/* helpers for creating ADB headers for config descriptors MMRs as client */ +#define ADB_ADDR(dma_buf, str, fld) ((dma_buf)->addr + offsetof(str, fld)) +#define MMR_ADB_ADDR(buf, str, fld) ADB_ADDR(&(buf), struct str, fld) + +#define VPDMA_SET_MMR_ADB_HDR(buf, str, hdr, regs, offset_a) \ + do { \ + struct vpdma_adb_hdr *h; \ + struct str *adb = NULL; \ + h = MMR_ADB_ADDR(buf, str, hdr); \ + h->offset = (offset_a); \ + h->nwords = sizeof(adb->regs) >> 2; \ + } while (0) + +/* vpdma descriptor buffer allocation and management */ +int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size); +void vpdma_free_desc_buf(struct vpdma_buf *buf); +int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf); +void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf); + +/* vpdma descriptor list funcs */ +int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type); +void vpdma_reset_desc_list(struct vpdma_desc_list *list); +void vpdma_free_desc_list(struct vpdma_desc_list *list); +int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list); + +/* helpers for creating vpdma descriptors */ +void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client, + struct vpdma_buf *blk, u32 dest_offset); +void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client, + struct vpdma_buf *adb); +void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list, + enum vpdma_channel chan); +void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, u32 flags); +void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width, + int frame_height, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, int field, u32 flags); + +/* vpdma list interrupt management */ +void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num, + bool enable); +void vpdma_clear_list_stat(struct vpdma_data *vpdma); + +/* vpdma client configuration */ +void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode, + enum vpdma_channel chan); +void vpdma_set_frame_start_event(struct vpdma_data *vpdma, + enum vpdma_frame_start_event fs_event, enum vpdma_channel chan); + +void vpdma_dump_regs(struct vpdma_data *vpdma); + +/* initialize vpdma, passed with VPE's platform device pointer */ +struct vpdma_data *vpdma_create(struct platform_device *pdev); + +#endif diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h new file mode 100644 index 000000000000..f0e9a8038c1b --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpdma_priv.h @@ -0,0 +1,641 @@ +/* + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _TI_VPDMA_PRIV_H_ +#define _TI_VPDMA_PRIV_H_ + +/* + * VPDMA Register offsets + */ + +/* Top level */ +#define VPDMA_PID 0x00 +#define VPDMA_LIST_ADDR 0x04 +#define VPDMA_LIST_ATTR 0x08 +#define VPDMA_LIST_STAT_SYNC 0x0c +#define VPDMA_BG_RGB 0x18 +#define VPDMA_BG_YUV 0x1c +#define VPDMA_SETUP 0x30 +#define VPDMA_MAX_SIZE1 0x34 +#define VPDMA_MAX_SIZE2 0x38 +#define VPDMA_MAX_SIZE3 0x3c + +/* Interrupts */ +#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8) +#define VPDMA_INT_CHAN_MASK(grp) (VPDMA_INT_CHAN_STAT(grp) + 4) +#define VPDMA_INT_CLIENT0_STAT 0x78 +#define VPDMA_INT_CLIENT0_MASK 0x7c +#define VPDMA_INT_CLIENT1_STAT 0x80 +#define VPDMA_INT_CLIENT1_MASK 0x84 +#define VPDMA_INT_LIST0_STAT 0x88 +#define VPDMA_INT_LIST0_MASK 0x8c + +#define VPDMA_PERFMON(i) (0x200 + i * 4) + +/* VPE specific client registers */ +#define VPDMA_DEI_CHROMA1_CSTAT 0x0300 +#define VPDMA_DEI_LUMA1_CSTAT 0x0304 +#define VPDMA_DEI_LUMA2_CSTAT 0x0308 +#define VPDMA_DEI_CHROMA2_CSTAT 0x030c +#define VPDMA_DEI_LUMA3_CSTAT 0x0310 +#define VPDMA_DEI_CHROMA3_CSTAT 0x0314 +#define VPDMA_DEI_MV_IN_CSTAT 0x0330 +#define VPDMA_DEI_MV_OUT_CSTAT 0x033c +#define VPDMA_VIP_UP_Y_CSTAT 0x0390 +#define VPDMA_VIP_UP_UV_CSTAT 0x0394 +#define VPDMA_VPI_CTL_CSTAT 0x03d0 + +/* Reg field info for VPDMA_CLIENT_CSTAT registers */ +#define VPDMA_CSTAT_LINE_MODE_MASK 0x03 +#define VPDMA_CSTAT_LINE_MODE_SHIFT 8 +#define VPDMA_CSTAT_FRAME_START_MASK 0xf +#define VPDMA_CSTAT_FRAME_START_SHIFT 10 + +#define VPDMA_LIST_NUM_MASK 0x07 +#define VPDMA_LIST_NUM_SHFT 24 +#define VPDMA_LIST_STOP_SHFT 20 +#define VPDMA_LIST_RDY_MASK 0x01 +#define VPDMA_LIST_RDY_SHFT 19 +#define VPDMA_LIST_TYPE_MASK 0x03 +#define VPDMA_LIST_TYPE_SHFT 16 +#define VPDMA_LIST_SIZE_MASK 0xffff + +/* VPDMA data type values for data formats */ +#define DATA_TYPE_Y444 0x0 +#define DATA_TYPE_Y422 0x1 +#define DATA_TYPE_Y420 0x2 +#define DATA_TYPE_C444 0x4 +#define DATA_TYPE_C422 0x5 +#define DATA_TYPE_C420 0x6 +#define DATA_TYPE_YC422 0x7 +#define DATA_TYPE_YC444 0x8 +#define DATA_TYPE_CY422 0x23 + +#define DATA_TYPE_RGB16_565 0x0 +#define DATA_TYPE_ARGB_1555 0x1 +#define DATA_TYPE_ARGB_4444 0x2 +#define DATA_TYPE_RGBA_5551 0x3 +#define DATA_TYPE_RGBA_4444 0x4 +#define DATA_TYPE_ARGB24_6666 0x5 +#define DATA_TYPE_RGB24_888 0x6 +#define DATA_TYPE_ARGB32_8888 0x7 +#define DATA_TYPE_RGBA24_6666 0x8 +#define DATA_TYPE_RGBA32_8888 0x9 +#define DATA_TYPE_BGR16_565 0x10 +#define DATA_TYPE_ABGR_1555 0x11 +#define DATA_TYPE_ABGR_4444 0x12 +#define DATA_TYPE_BGRA_5551 0x13 +#define DATA_TYPE_BGRA_4444 0x14 +#define DATA_TYPE_ABGR24_6666 0x15 +#define DATA_TYPE_BGR24_888 0x16 +#define DATA_TYPE_ABGR32_8888 0x17 +#define DATA_TYPE_BGRA24_6666 0x18 +#define DATA_TYPE_BGRA32_8888 0x19 + +#define DATA_TYPE_MV 0x3 + +/* VPDMA channel numbers(only VPE channels for now) */ +#define VPE_CHAN_NUM_LUMA1_IN 0 +#define VPE_CHAN_NUM_CHROMA1_IN 1 +#define VPE_CHAN_NUM_LUMA2_IN 2 +#define VPE_CHAN_NUM_CHROMA2_IN 3 +#define VPE_CHAN_NUM_LUMA3_IN 4 +#define VPE_CHAN_NUM_CHROMA3_IN 5 +#define VPE_CHAN_NUM_MV_IN 12 +#define VPE_CHAN_NUM_MV_OUT 15 +#define VPE_CHAN_NUM_LUMA_OUT 102 +#define VPE_CHAN_NUM_CHROMA_OUT 103 +#define VPE_CHAN_NUM_RGB_OUT 106 + +/* + * a VPDMA address data block payload for a configuration descriptor needs to + * have each sub block length as a multiple of 16 bytes. Therefore, the overall + * size of the payload also needs to be a multiple of 16 bytes. The sub block + * lengths should be ensured to be aligned by the VPDMA user. + */ +#define VPDMA_ADB_SIZE_ALIGN 0x0f + +/* + * data transfer descriptor + */ +struct vpdma_dtd { + u32 type_ctl_stride; + union { + u32 xfer_length_height; + u32 w1; + }; + dma_addr_t start_addr; + u32 pkt_ctl; + union { + u32 frame_width_height; /* inbound */ + dma_addr_t desc_write_addr; /* outbound */ + }; + union { + u32 start_h_v; /* inbound */ + u32 max_width_height; /* outbound */ + }; + u32 client_attr0; + u32 client_attr1; +}; + +/* Data Transfer Descriptor specifics */ +#define DTD_NO_NOTIFY 0 +#define DTD_NOTIFY 1 + +#define DTD_PKT_TYPE 0xa +#define DTD_DIR_IN 0 +#define DTD_DIR_OUT 1 + +/* type_ctl_stride */ +#define DTD_DATA_TYPE_MASK 0x3f +#define DTD_DATA_TYPE_SHFT 26 +#define DTD_NOTIFY_MASK 0x01 +#define DTD_NOTIFY_SHFT 25 +#define DTD_FIELD_MASK 0x01 +#define DTD_FIELD_SHFT 24 +#define DTD_1D_MASK 0x01 +#define DTD_1D_SHFT 23 +#define DTD_EVEN_LINE_SKIP_MASK 0x01 +#define DTD_EVEN_LINE_SKIP_SHFT 20 +#define DTD_ODD_LINE_SKIP_MASK 0x01 +#define DTD_ODD_LINE_SKIP_SHFT 16 +#define DTD_LINE_STRIDE_MASK 0xffff +#define DTD_LINE_STRIDE_SHFT 0 + +/* xfer_length_height */ +#define DTD_LINE_LENGTH_MASK 0xffff +#define DTD_LINE_LENGTH_SHFT 16 +#define DTD_XFER_HEIGHT_MASK 0xffff +#define DTD_XFER_HEIGHT_SHFT 0 + +/* pkt_ctl */ +#define DTD_PKT_TYPE_MASK 0x1f +#define DTD_PKT_TYPE_SHFT 27 +#define DTD_MODE_MASK 0x01 +#define DTD_MODE_SHFT 26 +#define DTD_DIR_MASK 0x01 +#define DTD_DIR_SHFT 25 +#define DTD_CHAN_MASK 0x01ff +#define DTD_CHAN_SHFT 16 +#define DTD_PRI_MASK 0x0f +#define DTD_PRI_SHFT 9 +#define DTD_NEXT_CHAN_MASK 0x01ff +#define DTD_NEXT_CHAN_SHFT 0 + +/* frame_width_height */ +#define DTD_FRAME_WIDTH_MASK 0xffff +#define DTD_FRAME_WIDTH_SHFT 16 +#define DTD_FRAME_HEIGHT_MASK 0xffff +#define DTD_FRAME_HEIGHT_SHFT 0 + +/* start_h_v */ +#define DTD_H_START_MASK 0xffff +#define DTD_H_START_SHFT 16 +#define DTD_V_START_MASK 0xffff +#define DTD_V_START_SHFT 0 + +#define DTD_DESC_START_SHIFT 5 +#define DTD_WRITE_DESC_MASK 0x01 +#define DTD_WRITE_DESC_SHIFT 2 +#define DTD_DROP_DATA_MASK 0x01 +#define DTD_DROP_DATA_SHIFT 1 +#define DTD_USE_DESC_MASK 0x01 +#define DTD_USE_DESC_SHIFT 0 + +/* max_width_height */ +#define DTD_MAX_WIDTH_MASK 0x07 +#define DTD_MAX_WIDTH_SHFT 4 +#define DTD_MAX_HEIGHT_MASK 0x07 +#define DTD_MAX_HEIGHT_SHFT 0 + +/* max width configurations */ + /* unlimited width */ +#define MAX_OUT_WIDTH_UNLIMITED 0 +/* as specified in max_size1 reg */ +#define MAX_OUT_WIDTH_REG1 1 +/* as specified in max_size2 reg */ +#define MAX_OUT_WIDTH_REG2 2 +/* as specified in max_size3 reg */ +#define MAX_OUT_WIDTH_REG3 3 +/* maximum of 352 pixels as width */ +#define MAX_OUT_WIDTH_352 4 +/* maximum of 768 pixels as width */ +#define MAX_OUT_WIDTH_768 5 +/* maximum of 1280 pixels width */ +#define MAX_OUT_WIDTH_1280 6 +/* maximum of 1920 pixels as width */ +#define MAX_OUT_WIDTH_1920 7 + +/* max height configurations */ + /* unlimited height */ +#define MAX_OUT_HEIGHT_UNLIMITED 0 +/* as specified in max_size1 reg */ +#define MAX_OUT_HEIGHT_REG1 1 +/* as specified in max_size2 reg */ +#define MAX_OUT_HEIGHT_REG2 2 +/* as specified in max_size3 reg */ +#define MAX_OUT_HEIGHT_REG3 3 +/* maximum of 288 lines as height */ +#define MAX_OUT_HEIGHT_288 4 +/* maximum of 576 lines as height */ +#define MAX_OUT_HEIGHT_576 5 +/* maximum of 720 lines as height */ +#define MAX_OUT_HEIGHT_720 6 +/* maximum of 1080 lines as height */ +#define MAX_OUT_HEIGHT_1080 7 + +static inline u32 dtd_type_ctl_stride(int type, bool notify, int field, + bool one_d, bool even_line_skip, bool odd_line_skip, + int line_stride) +{ + return (type << DTD_DATA_TYPE_SHFT) | (notify << DTD_NOTIFY_SHFT) | + (field << DTD_FIELD_SHFT) | (one_d << DTD_1D_SHFT) | + (even_line_skip << DTD_EVEN_LINE_SKIP_SHFT) | + (odd_line_skip << DTD_ODD_LINE_SKIP_SHFT) | + line_stride; +} + +static inline u32 dtd_xfer_length_height(int line_length, int xfer_height) +{ + return (line_length << DTD_LINE_LENGTH_SHFT) | xfer_height; +} + +static inline u32 dtd_pkt_ctl(bool mode, bool dir, int chan, int pri, + int next_chan) +{ + return (DTD_PKT_TYPE << DTD_PKT_TYPE_SHFT) | (mode << DTD_MODE_SHFT) | + (dir << DTD_DIR_SHFT) | (chan << DTD_CHAN_SHFT) | + (pri << DTD_PRI_SHFT) | next_chan; +} + +static inline u32 dtd_frame_width_height(int width, int height) +{ + return (width << DTD_FRAME_WIDTH_SHFT) | height; +} + +static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc, + bool drop_data, bool use_desc) +{ + return (addr << DTD_DESC_START_SHIFT) | + (write_desc << DTD_WRITE_DESC_SHIFT) | + (drop_data << DTD_DROP_DATA_SHIFT) | + use_desc; +} + +static inline u32 dtd_start_h_v(int h_start, int v_start) +{ + return (h_start << DTD_H_START_SHFT) | v_start; +} + +static inline u32 dtd_max_width_height(int max_width, int max_height) +{ + return (max_width << DTD_MAX_WIDTH_SHFT) | max_height; +} + +static inline int dtd_get_data_type(struct vpdma_dtd *dtd) +{ + return dtd->type_ctl_stride >> DTD_DATA_TYPE_SHFT; +} + +static inline bool dtd_get_notify(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_NOTIFY_SHFT) & DTD_NOTIFY_MASK; +} + +static inline int dtd_get_field(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_FIELD_SHFT) & DTD_FIELD_MASK; +} + +static inline bool dtd_get_1d(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_1D_SHFT) & DTD_1D_MASK; +} + +static inline bool dtd_get_even_line_skip(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_EVEN_LINE_SKIP_SHFT) + & DTD_EVEN_LINE_SKIP_MASK; +} + +static inline bool dtd_get_odd_line_skip(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_ODD_LINE_SKIP_SHFT) + & DTD_ODD_LINE_SKIP_MASK; +} + +static inline int dtd_get_line_stride(struct vpdma_dtd *dtd) +{ + return dtd->type_ctl_stride & DTD_LINE_STRIDE_MASK; +} + +static inline int dtd_get_line_length(struct vpdma_dtd *dtd) +{ + return dtd->xfer_length_height >> DTD_LINE_LENGTH_SHFT; +} + +static inline int dtd_get_xfer_height(struct vpdma_dtd *dtd) +{ + return dtd->xfer_length_height & DTD_XFER_HEIGHT_MASK; +} + +static inline int dtd_get_pkt_type(struct vpdma_dtd *dtd) +{ + return dtd->pkt_ctl >> DTD_PKT_TYPE_SHFT; +} + +static inline bool dtd_get_mode(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_MODE_SHFT) & DTD_MODE_MASK; +} + +static inline bool dtd_get_dir(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_DIR_SHFT) & DTD_DIR_MASK; +} + +static inline int dtd_get_chan(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_CHAN_SHFT) & DTD_CHAN_MASK; +} + +static inline int dtd_get_priority(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_PRI_SHFT) & DTD_PRI_MASK; +} + +static inline int dtd_get_next_chan(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_NEXT_CHAN_SHFT) & DTD_NEXT_CHAN_MASK; +} + +static inline int dtd_get_frame_width(struct vpdma_dtd *dtd) +{ + return dtd->frame_width_height >> DTD_FRAME_WIDTH_SHFT; +} + +static inline int dtd_get_frame_height(struct vpdma_dtd *dtd) +{ + return dtd->frame_width_height & DTD_FRAME_HEIGHT_MASK; +} + +static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd) +{ + return dtd->desc_write_addr >> DTD_DESC_START_SHIFT; +} + +static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd) +{ + return (dtd->desc_write_addr >> DTD_WRITE_DESC_SHIFT) & + DTD_WRITE_DESC_MASK; +} + +static inline bool dtd_get_drop_data(struct vpdma_dtd *dtd) +{ + return (dtd->desc_write_addr >> DTD_DROP_DATA_SHIFT) & + DTD_DROP_DATA_MASK; +} + +static inline bool dtd_get_use_desc(struct vpdma_dtd *dtd) +{ + return dtd->desc_write_addr & DTD_USE_DESC_MASK; +} + +static inline int dtd_get_h_start(struct vpdma_dtd *dtd) +{ + return dtd->start_h_v >> DTD_H_START_SHFT; +} + +static inline int dtd_get_v_start(struct vpdma_dtd *dtd) +{ + return dtd->start_h_v & DTD_V_START_MASK; +} + +static inline int dtd_get_max_width(struct vpdma_dtd *dtd) +{ + return (dtd->max_width_height >> DTD_MAX_WIDTH_SHFT) & + DTD_MAX_WIDTH_MASK; +} + +static inline int dtd_get_max_height(struct vpdma_dtd *dtd) +{ + return (dtd->max_width_height >> DTD_MAX_HEIGHT_SHFT) & + DTD_MAX_HEIGHT_MASK; +} + +/* + * configuration descriptor + */ +struct vpdma_cfd { + union { + u32 dest_addr_offset; + u32 w0; + }; + union { + u32 block_len; /* in words */ + u32 w1; + }; + u32 payload_addr; + u32 ctl_payload_len; /* in words */ +}; + +/* Configuration descriptor specifics */ + +#define CFD_PKT_TYPE 0xb + +#define CFD_DIRECT 1 +#define CFD_INDIRECT 0 +#define CFD_CLS_ADB 0 +#define CFD_CLS_BLOCK 1 + +/* block_len */ +#define CFD__BLOCK_LEN_MASK 0xffff +#define CFD__BLOCK_LEN_SHFT 0 + +/* ctl_payload_len */ +#define CFD_PKT_TYPE_MASK 0x1f +#define CFD_PKT_TYPE_SHFT 27 +#define CFD_DIRECT_MASK 0x01 +#define CFD_DIRECT_SHFT 26 +#define CFD_CLASS_MASK 0x03 +#define CFD_CLASS_SHFT 24 +#define CFD_DEST_MASK 0xff +#define CFD_DEST_SHFT 16 +#define CFD_PAYLOAD_LEN_MASK 0xffff +#define CFD_PAYLOAD_LEN_SHFT 0 + +static inline u32 cfd_pkt_payload_len(bool direct, int cls, int dest, + int payload_len) +{ + return (CFD_PKT_TYPE << CFD_PKT_TYPE_SHFT) | + (direct << CFD_DIRECT_SHFT) | + (cls << CFD_CLASS_SHFT) | + (dest << CFD_DEST_SHFT) | + payload_len; +} + +static inline int cfd_get_pkt_type(struct vpdma_cfd *cfd) +{ + return cfd->ctl_payload_len >> CFD_PKT_TYPE_SHFT; +} + +static inline bool cfd_get_direct(struct vpdma_cfd *cfd) +{ + return (cfd->ctl_payload_len >> CFD_DIRECT_SHFT) & CFD_DIRECT_MASK; +} + +static inline bool cfd_get_class(struct vpdma_cfd *cfd) +{ + return (cfd->ctl_payload_len >> CFD_CLASS_SHFT) & CFD_CLASS_MASK; +} + +static inline int cfd_get_dest(struct vpdma_cfd *cfd) +{ + return (cfd->ctl_payload_len >> CFD_DEST_SHFT) & CFD_DEST_MASK; +} + +static inline int cfd_get_payload_len(struct vpdma_cfd *cfd) +{ + return cfd->ctl_payload_len & CFD_PAYLOAD_LEN_MASK; +} + +/* + * control descriptor + */ +struct vpdma_ctd { + union { + u32 timer_value; + u32 list_addr; + u32 w0; + }; + union { + u32 pixel_line_count; + u32 list_size; + u32 w1; + }; + union { + u32 event; + u32 fid_ctl; + u32 w2; + }; + u32 type_source_ctl; +}; + +/* control descriptor types */ +#define CTD_TYPE_SYNC_ON_CLIENT 0 +#define CTD_TYPE_SYNC_ON_LIST 1 +#define CTD_TYPE_SYNC_ON_EXT 2 +#define CTD_TYPE_SYNC_ON_LM_TIMER 3 +#define CTD_TYPE_SYNC_ON_CHANNEL 4 +#define CTD_TYPE_CHNG_CLIENT_IRQ 5 +#define CTD_TYPE_SEND_IRQ 6 +#define CTD_TYPE_RELOAD_LIST 7 +#define CTD_TYPE_ABORT_CHANNEL 8 + +#define CTD_PKT_TYPE 0xc + +/* timer_value */ +#define CTD_TIMER_VALUE_MASK 0xffff +#define CTD_TIMER_VALUE_SHFT 0 + +/* pixel_line_count */ +#define CTD_PIXEL_COUNT_MASK 0xffff +#define CTD_PIXEL_COUNT_SHFT 16 +#define CTD_LINE_COUNT_MASK 0xffff +#define CTD_LINE_COUNT_SHFT 0 + +/* list_size */ +#define CTD_LIST_SIZE_MASK 0xffff +#define CTD_LIST_SIZE_SHFT 0 + +/* event */ +#define CTD_EVENT_MASK 0x0f +#define CTD_EVENT_SHFT 0 + +/* fid_ctl */ +#define CTD_FID2_MASK 0x03 +#define CTD_FID2_SHFT 4 +#define CTD_FID1_MASK 0x03 +#define CTD_FID1_SHFT 2 +#define CTD_FID0_MASK 0x03 +#define CTD_FID0_SHFT 0 + +/* type_source_ctl */ +#define CTD_PKT_TYPE_MASK 0x1f +#define CTD_PKT_TYPE_SHFT 27 +#define CTD_SOURCE_MASK 0xff +#define CTD_SOURCE_SHFT 16 +#define CTD_CONTROL_MASK 0x0f +#define CTD_CONTROL_SHFT 0 + +static inline u32 ctd_pixel_line_count(int pixel_count, int line_count) +{ + return (pixel_count << CTD_PIXEL_COUNT_SHFT) | line_count; +} + +static inline u32 ctd_set_fid_ctl(int fid0, int fid1, int fid2) +{ + return (fid2 << CTD_FID2_SHFT) | (fid1 << CTD_FID1_SHFT) | fid0; +} + +static inline u32 ctd_type_source_ctl(int source, int control) +{ + return (CTD_PKT_TYPE << CTD_PKT_TYPE_SHFT) | + (source << CTD_SOURCE_SHFT) | control; +} + +static inline u32 ctd_get_pixel_count(struct vpdma_ctd *ctd) +{ + return ctd->pixel_line_count >> CTD_PIXEL_COUNT_SHFT; +} + +static inline int ctd_get_line_count(struct vpdma_ctd *ctd) +{ + return ctd->pixel_line_count & CTD_LINE_COUNT_MASK; +} + +static inline int ctd_get_event(struct vpdma_ctd *ctd) +{ + return ctd->event & CTD_EVENT_MASK; +} + +static inline int ctd_get_fid2_ctl(struct vpdma_ctd *ctd) +{ + return (ctd->fid_ctl >> CTD_FID2_SHFT) & CTD_FID2_MASK; +} + +static inline int ctd_get_fid1_ctl(struct vpdma_ctd *ctd) +{ + return (ctd->fid_ctl >> CTD_FID1_SHFT) & CTD_FID1_MASK; +} + +static inline int ctd_get_fid0_ctl(struct vpdma_ctd *ctd) +{ + return ctd->fid_ctl & CTD_FID2_MASK; +} + +static inline int ctd_get_pkt_type(struct vpdma_ctd *ctd) +{ + return ctd->type_source_ctl >> CTD_PKT_TYPE_SHFT; +} + +static inline int ctd_get_source(struct vpdma_ctd *ctd) +{ + return (ctd->type_source_ctl >> CTD_SOURCE_SHFT) & CTD_SOURCE_MASK; +} + +static inline int ctd_get_ctl(struct vpdma_ctd *ctd) +{ + return ctd->type_source_ctl & CTD_CONTROL_MASK; +} + +#endif diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c new file mode 100644 index 000000000000..4e58069e24ff --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -0,0 +1,2099 @@ +/* + * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver + * + * Copyright (c) 2013 Texas Instruments Inc. + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Pawel Osciak, <pawel@osciak.com> + * Marek Szyprowski, <m.szyprowski@samsung.com> + * + * Based on the virtual v4l2-mem2mem example device + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioctl.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/videodev2.h> + +#include <media/v4l2-common.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-dma-contig.h> + +#include "vpdma.h" +#include "vpe_regs.h" + +#define VPE_MODULE_NAME "vpe" + +/* minimum and maximum frame sizes */ +#define MIN_W 128 +#define MIN_H 128 +#define MAX_W 1920 +#define MAX_H 1080 + +/* required alignments */ +#define S_ALIGN 0 /* multiple of 1 */ +#define H_ALIGN 1 /* multiple of 2 */ +#define W_ALIGN 1 /* multiple of 2 */ + +/* multiple of 128 bits, line stride, 16 bytes */ +#define L_ALIGN 4 + +/* flags that indicate a format can be used for capture/output */ +#define VPE_FMT_TYPE_CAPTURE (1 << 0) +#define VPE_FMT_TYPE_OUTPUT (1 << 1) + +/* used as plane indices */ +#define VPE_MAX_PLANES 2 +#define VPE_LUMA 0 +#define VPE_CHROMA 1 + +/* per m2m context info */ +#define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */ + +#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */ + +/* + * each VPE context can need up to 3 config desciptors, 7 input descriptors, + * 3 output descriptors, and 10 control descriptors + */ +#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \ + 13 * VPDMA_CFD_CTD_DESC_SIZE) + +#define vpe_dbg(vpedev, fmt, arg...) \ + dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg) +#define vpe_err(vpedev, fmt, arg...) \ + dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg) + +struct vpe_us_coeffs { + unsigned short anchor_fid0_c0; + unsigned short anchor_fid0_c1; + unsigned short anchor_fid0_c2; + unsigned short anchor_fid0_c3; + unsigned short interp_fid0_c0; + unsigned short interp_fid0_c1; + unsigned short interp_fid0_c2; + unsigned short interp_fid0_c3; + unsigned short anchor_fid1_c0; + unsigned short anchor_fid1_c1; + unsigned short anchor_fid1_c2; + unsigned short anchor_fid1_c3; + unsigned short interp_fid1_c0; + unsigned short interp_fid1_c1; + unsigned short interp_fid1_c2; + unsigned short interp_fid1_c3; +}; + +/* + * Default upsampler coefficients + */ +static const struct vpe_us_coeffs us_coeffs[] = { + { + /* Coefficients for progressive input */ + 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, + 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, + }, + { + /* Coefficients for Top Field Interlaced input */ + 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3, + /* Coefficients for Bottom Field Interlaced input */ + 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9, + }, +}; + +/* + * the following registers are for configuring some of the parameters of the + * motion and edge detection blocks inside DEI, these generally remain the same, + * these could be passed later via userspace if some one needs to tweak these. + */ +struct vpe_dei_regs { + unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */ + unsigned long edi_config_reg; /* VPE_DEI_REG3 */ + unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */ + unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */ + unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */ + unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */ +}; + +/* + * default expert DEI register values, unlikely to be modified. + */ +static const struct vpe_dei_regs dei_regs = { + 0x020C0804u, + 0x0118100Fu, + 0x08040200u, + 0x1010100Cu, + 0x10101010u, + 0x10101010u, +}; + +/* + * The port_data structure contains per-port data. + */ +struct vpe_port_data { + enum vpdma_channel channel; /* VPDMA channel */ + u8 vb_index; /* input frame f, f-1, f-2 index */ + u8 vb_part; /* plane index for co-panar formats */ +}; + +/* + * Define indices into the port_data tables + */ +#define VPE_PORT_LUMA1_IN 0 +#define VPE_PORT_CHROMA1_IN 1 +#define VPE_PORT_LUMA2_IN 2 +#define VPE_PORT_CHROMA2_IN 3 +#define VPE_PORT_LUMA3_IN 4 +#define VPE_PORT_CHROMA3_IN 5 +#define VPE_PORT_MV_IN 6 +#define VPE_PORT_MV_OUT 7 +#define VPE_PORT_LUMA_OUT 8 +#define VPE_PORT_CHROMA_OUT 9 +#define VPE_PORT_RGB_OUT 10 + +static const struct vpe_port_data port_data[11] = { + [VPE_PORT_LUMA1_IN] = { + .channel = VPE_CHAN_LUMA1_IN, + .vb_index = 0, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA1_IN] = { + .channel = VPE_CHAN_CHROMA1_IN, + .vb_index = 0, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_LUMA2_IN] = { + .channel = VPE_CHAN_LUMA2_IN, + .vb_index = 1, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA2_IN] = { + .channel = VPE_CHAN_CHROMA2_IN, + .vb_index = 1, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_LUMA3_IN] = { + .channel = VPE_CHAN_LUMA3_IN, + .vb_index = 2, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA3_IN] = { + .channel = VPE_CHAN_CHROMA3_IN, + .vb_index = 2, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_MV_IN] = { + .channel = VPE_CHAN_MV_IN, + }, + [VPE_PORT_MV_OUT] = { + .channel = VPE_CHAN_MV_OUT, + }, + [VPE_PORT_LUMA_OUT] = { + .channel = VPE_CHAN_LUMA_OUT, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA_OUT] = { + .channel = VPE_CHAN_CHROMA_OUT, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_RGB_OUT] = { + .channel = VPE_CHAN_RGB_OUT, + .vb_part = VPE_LUMA, + }, +}; + + +/* driver info for each of the supported video formats */ +struct vpe_fmt { + char *name; /* human-readable name */ + u32 fourcc; /* standard format identifier */ + u8 types; /* CAPTURE and/or OUTPUT */ + u8 coplanar; /* set for unpacked Luma and Chroma */ + /* vpdma format info for each plane */ + struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES]; +}; + +static struct vpe_fmt vpe_formats[] = { + { + .name = "YUV 422 co-planar", + .fourcc = V4L2_PIX_FMT_NV16, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 1, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444], + &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444], + }, + }, + { + .name = "YUV 420 co-planar", + .fourcc = V4L2_PIX_FMT_NV12, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 1, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420], + &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420], + }, + }, + { + .name = "YUYV 422 packed", + .fourcc = V4L2_PIX_FMT_YUYV, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 0, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422], + }, + }, + { + .name = "UYVY 422 packed", + .fourcc = V4L2_PIX_FMT_UYVY, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 0, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422], + }, + }, +}; + +/* + * per-queue, driver-specific private data. + * there is one source queue and one destination queue for each m2m context. + */ +struct vpe_q_data { + unsigned int width; /* frame width */ + unsigned int height; /* frame height */ + unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */ + enum v4l2_colorspace colorspace; + enum v4l2_field field; /* supported field value */ + unsigned int flags; + unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */ + struct v4l2_rect c_rect; /* crop/compose rectangle */ + struct vpe_fmt *fmt; /* format info */ +}; + +/* vpe_q_data flag bits */ +#define Q_DATA_FRAME_1D (1 << 0) +#define Q_DATA_MODE_TILED (1 << 1) +#define Q_DATA_INTERLACED (1 << 2) + +enum { + Q_DATA_SRC = 0, + Q_DATA_DST = 1, +}; + +/* find our format description corresponding to the passed v4l2_format */ +static struct vpe_fmt *find_format(struct v4l2_format *f) +{ + struct vpe_fmt *fmt; + unsigned int k; + + for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) { + fmt = &vpe_formats[k]; + if (fmt->fourcc == f->fmt.pix.pixelformat) + return fmt; + } + + return NULL; +} + +/* + * there is one vpe_dev structure in the driver, it is shared by + * all instances. + */ +struct vpe_dev { + struct v4l2_device v4l2_dev; + struct video_device vfd; + struct v4l2_m2m_dev *m2m_dev; + + atomic_t num_instances; /* count of driver instances */ + dma_addr_t loaded_mmrs; /* shadow mmrs in device */ + struct mutex dev_mutex; + spinlock_t lock; + + int irq; + void __iomem *base; + + struct vb2_alloc_ctx *alloc_ctx; + struct vpdma_data *vpdma; /* vpdma data handle */ +}; + +/* + * There is one vpe_ctx structure for each m2m context. + */ +struct vpe_ctx { + struct v4l2_fh fh; + struct vpe_dev *dev; + struct v4l2_m2m_ctx *m2m_ctx; + struct v4l2_ctrl_handler hdl; + + unsigned int field; /* current field */ + unsigned int sequence; /* current frame/field seq */ + unsigned int aborting; /* abort after next irq */ + + unsigned int bufs_per_job; /* input buffers per batch */ + unsigned int bufs_completed; /* bufs done in this batch */ + + struct vpe_q_data q_data[2]; /* src & dst queue data */ + struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS]; + struct vb2_buffer *dst_vb; + + dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */ + void *mv_buf[2]; /* virtual addrs of motion vector bufs */ + size_t mv_buf_size; /* current motion vector buffer size */ + struct vpdma_buf mmr_adb; /* shadow reg addr/data block */ + struct vpdma_desc_list desc_list; /* DMA descriptor list */ + + bool deinterlacing; /* using de-interlacer */ + bool load_mmrs; /* have new shadow reg values */ + + unsigned int src_mv_buf_selector; +}; + + +/* + * M2M devices get 2 queues. + * Return the queue given the type. + */ +static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx, + enum v4l2_buf_type type) +{ + switch (type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return &ctx->q_data[Q_DATA_SRC]; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + return &ctx->q_data[Q_DATA_DST]; + default: + BUG(); + } + return NULL; +} + +static u32 read_reg(struct vpe_dev *dev, int offset) +{ + return ioread32(dev->base + offset); +} + +static void write_reg(struct vpe_dev *dev, int offset, u32 value) +{ + iowrite32(value, dev->base + offset); +} + +/* register field read/write helpers */ +static int get_field(u32 value, u32 mask, int shift) +{ + return (value & (mask << shift)) >> shift; +} + +static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift) +{ + return get_field(read_reg(dev, offset), mask, shift); +} + +static void write_field(u32 *valp, u32 field, u32 mask, int shift) +{ + u32 val = *valp; + + val &= ~(mask << shift); + val |= (field & mask) << shift; + *valp = val; +} + +static void write_field_reg(struct vpe_dev *dev, int offset, u32 field, + u32 mask, int shift) +{ + u32 val = read_reg(dev, offset); + + write_field(&val, field, mask, shift); + + write_reg(dev, offset, val); +} + +/* + * DMA address/data block for the shadow registers + */ +struct vpe_mmr_adb { + struct vpdma_adb_hdr out_fmt_hdr; + u32 out_fmt_reg[1]; + u32 out_fmt_pad[3]; + struct vpdma_adb_hdr us1_hdr; + u32 us1_regs[8]; + struct vpdma_adb_hdr us2_hdr; + u32 us2_regs[8]; + struct vpdma_adb_hdr us3_hdr; + u32 us3_regs[8]; + struct vpdma_adb_hdr dei_hdr; + u32 dei_regs[8]; + struct vpdma_adb_hdr sc_hdr; + u32 sc_regs[1]; + u32 sc_pad[3]; + struct vpdma_adb_hdr csc_hdr; + u32 csc_regs[6]; + u32 csc_pad[2]; +}; + +#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \ + VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a) +/* + * Set the headers for all of the address/data block structures. + */ +static void init_adb_hdrs(struct vpe_ctx *ctx) +{ + VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT); + VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0); + VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0); + VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0); + VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE); + VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0); + VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00); +}; + +/* + * Allocate or re-allocate the motion vector DMA buffers + * There are two buffers, one for input and one for output. + * However, the roles are reversed after each field is processed. + * In other words, after each field is processed, the previous + * output (dst) MV buffer becomes the new input (src) MV buffer. + */ +static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size) +{ + struct device *dev = ctx->dev->v4l2_dev.dev; + + if (ctx->mv_buf_size == size) + return 0; + + if (ctx->mv_buf[0]) + dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0], + ctx->mv_buf_dma[0]); + + if (ctx->mv_buf[1]) + dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1], + ctx->mv_buf_dma[1]); + + if (size == 0) + return 0; + + ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0], + GFP_KERNEL); + if (!ctx->mv_buf[0]) { + vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); + return -ENOMEM; + } + + ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1], + GFP_KERNEL); + if (!ctx->mv_buf[1]) { + vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); + dma_free_coherent(dev, size, ctx->mv_buf[0], + ctx->mv_buf_dma[0]); + + return -ENOMEM; + } + + ctx->mv_buf_size = size; + ctx->src_mv_buf_selector = 0; + + return 0; +} + +static void free_mv_buffers(struct vpe_ctx *ctx) +{ + realloc_mv_buffers(ctx, 0); +} + +/* + * While de-interlacing, we keep the two most recent input buffers + * around. This function frees those two buffers when we have + * finished processing the current stream. + */ +static void free_vbs(struct vpe_ctx *ctx) +{ + struct vpe_dev *dev = ctx->dev; + unsigned long flags; + + if (ctx->src_vbs[2] == NULL) + return; + + spin_lock_irqsave(&dev->lock, flags); + if (ctx->src_vbs[2]) { + v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE); + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* + * Enable or disable the VPE clocks + */ +static void vpe_set_clock_enable(struct vpe_dev *dev, bool on) +{ + u32 val = 0; + + if (on) + val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE; + write_reg(dev, VPE_CLK_ENABLE, val); +} + +static void vpe_top_reset(struct vpe_dev *dev) +{ + + write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK, + VPE_DATA_PATH_CLK_RESET_SHIFT); + + usleep_range(100, 150); + + write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK, + VPE_DATA_PATH_CLK_RESET_SHIFT); +} + +static void vpe_top_vpdma_reset(struct vpe_dev *dev) +{ + write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK, + VPE_VPDMA_CLK_RESET_SHIFT); + + usleep_range(100, 150); + + write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK, + VPE_VPDMA_CLK_RESET_SHIFT); +} + +/* + * Load the correct of upsampler coefficients into the shadow MMRs + */ +static void set_us_coefficients(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; + u32 *us1_reg = &mmr_adb->us1_regs[0]; + u32 *us2_reg = &mmr_adb->us2_regs[0]; + u32 *us3_reg = &mmr_adb->us3_regs[0]; + const unsigned short *cp, *end_cp; + + cp = &us_coeffs[0].anchor_fid0_c0; + + if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */ + cp += sizeof(us_coeffs[0]) / sizeof(*cp); + + end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp); + + while (cp < end_cp) { + write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT); + write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT); + *us2_reg++ = *us1_reg; + *us3_reg++ = *us1_reg++; + } + ctx->load_mmrs = true; +} + +/* + * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs. + */ +static void set_cfg_and_line_modes(struct vpe_ctx *ctx) +{ + struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *us1_reg0 = &mmr_adb->us1_regs[0]; + u32 *us2_reg0 = &mmr_adb->us2_regs[0]; + u32 *us3_reg0 = &mmr_adb->us3_regs[0]; + int line_mode = 1; + int cfg_mode = 1; + + /* + * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing. + * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing. + */ + + if (fmt->fourcc == V4L2_PIX_FMT_NV12) { + cfg_mode = 0; + line_mode = 0; /* double lines to line buffer */ + } + + write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); + write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); + write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); + + /* regs for now */ + vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN); + vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN); + vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN); + + /* frame start for input luma */ + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_LUMA1_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_LUMA2_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_LUMA3_IN); + + /* frame start for input chroma */ + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_CHROMA1_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_CHROMA2_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_CHROMA3_IN); + + /* frame start for MV in client */ + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_MV_IN); + + ctx->load_mmrs = true; +} + +/* + * Set the shadow registers that are modified when the source + * format changes. + */ +static void set_src_registers(struct vpe_ctx *ctx) +{ + set_us_coefficients(ctx); +} + +/* + * Set the shadow registers that are modified when the destination + * format changes. + */ +static void set_dst_registers(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt; + u32 val = 0; + + /* select RGB path when color space conversion is supported in future */ + if (fmt->fourcc == V4L2_PIX_FMT_RGB24) + val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER; + else if (fmt->fourcc == V4L2_PIX_FMT_NV16) + val |= VPE_COLOR_SEPARATE_422; + + /* The source of CHR_DS is always the scaler, whether it's used or not */ + val |= VPE_DS_SRC_DEI_SCALER; + + if (fmt->fourcc != V4L2_PIX_FMT_NV12) + val |= VPE_DS_BYPASS; + + mmr_adb->out_fmt_reg[0] = val; + + ctx->load_mmrs = true; +} + +/* + * Set the de-interlacer shadow register values + */ +static void set_dei_regs(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; + unsigned int src_h = s_q_data->c_rect.height; + unsigned int src_w = s_q_data->c_rect.width; + u32 *dei_mmr0 = &mmr_adb->dei_regs[0]; + bool deinterlace = true; + u32 val = 0; + + /* + * according to TRM, we should set DEI in progressive bypass mode when + * the input content is progressive, however, DEI is bypassed correctly + * for both progressive and interlace content in interlace bypass mode. + * It has been recommended not to use progressive bypass mode. + */ + if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) || + !(s_q_data->flags & Q_DATA_INTERLACED)) { + deinterlace = false; + val = VPE_DEI_INTERLACE_BYPASS; + } + + src_h = deinterlace ? src_h * 2 : src_h; + + val |= (src_h << VPE_DEI_HEIGHT_SHIFT) | + (src_w << VPE_DEI_WIDTH_SHIFT) | + VPE_DEI_FIELD_FLUSH; + + *dei_mmr0 = val; + + ctx->load_mmrs = true; +} + +static void set_dei_shadow_registers(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *dei_mmr = &mmr_adb->dei_regs[0]; + const struct vpe_dei_regs *cur = &dei_regs; + + dei_mmr[2] = cur->mdt_spacial_freq_thr_reg; + dei_mmr[3] = cur->edi_config_reg; + dei_mmr[4] = cur->edi_lut_reg0; + dei_mmr[5] = cur->edi_lut_reg1; + dei_mmr[6] = cur->edi_lut_reg2; + dei_mmr[7] = cur->edi_lut_reg3; + + ctx->load_mmrs = true; +} + +static void set_csc_coeff_bypass(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5]; + + *shadow_csc_reg5 |= VPE_CSC_BYPASS; + + ctx->load_mmrs = true; +} + +static void set_sc_regs_bypass(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *sc_reg0 = &mmr_adb->sc_regs[0]; + u32 val = 0; + + val |= VPE_SC_BYPASS; + *sc_reg0 = val; + + ctx->load_mmrs = true; +} + +/* + * Set the shadow registers whose values are modified when either the + * source or destination format is changed. + */ +static int set_srcdst_params(struct vpe_ctx *ctx) +{ + struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; + struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; + size_t mv_buf_size; + int ret; + + ctx->sequence = 0; + ctx->field = V4L2_FIELD_TOP; + + if ((s_q_data->flags & Q_DATA_INTERLACED) && + !(d_q_data->flags & Q_DATA_INTERLACED)) { + const struct vpdma_data_format *mv = + &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; + + ctx->deinterlacing = 1; + mv_buf_size = + (s_q_data->width * s_q_data->height * mv->depth) >> 3; + } else { + ctx->deinterlacing = 0; + mv_buf_size = 0; + } + + free_vbs(ctx); + + ret = realloc_mv_buffers(ctx, mv_buf_size); + if (ret) + return ret; + + set_cfg_and_line_modes(ctx); + set_dei_regs(ctx); + set_csc_coeff_bypass(ctx); + set_sc_regs_bypass(ctx); + + return 0; +} + +/* + * Return the vpe_ctx structure for a given struct file + */ +static struct vpe_ctx *file2ctx(struct file *file) +{ + return container_of(file->private_data, struct vpe_ctx, fh); +} + +/* + * mem2mem callbacks + */ + +/** + * job_ready() - check whether an instance is ready to be scheduled to run + */ +static int job_ready(void *priv) +{ + struct vpe_ctx *ctx = priv; + int needed = ctx->bufs_per_job; + + if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) + needed += 2; /* need additional two most recent fields */ + + if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed) + return 0; + + return 1; +} + +static void job_abort(void *priv) +{ + struct vpe_ctx *ctx = priv; + + /* Will cancel the transaction in the next interrupt handler */ + ctx->aborting = 1; +} + +/* + * Lock access to the device + */ +static void vpe_lock(void *priv) +{ + struct vpe_ctx *ctx = priv; + struct vpe_dev *dev = ctx->dev; + mutex_lock(&dev->dev_mutex); +} + +static void vpe_unlock(void *priv) +{ + struct vpe_ctx *ctx = priv; + struct vpe_dev *dev = ctx->dev; + mutex_unlock(&dev->dev_mutex); +} + +static void vpe_dump_regs(struct vpe_dev *dev) +{ +#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r)) + + vpe_dbg(dev, "VPE Registers:\n"); + + DUMPREG(PID); + DUMPREG(SYSCONFIG); + DUMPREG(INT0_STATUS0_RAW); + DUMPREG(INT0_STATUS0); + DUMPREG(INT0_ENABLE0); + DUMPREG(INT0_STATUS1_RAW); + DUMPREG(INT0_STATUS1); + DUMPREG(INT0_ENABLE1); + DUMPREG(CLK_ENABLE); + DUMPREG(CLK_RESET); + DUMPREG(CLK_FORMAT_SELECT); + DUMPREG(CLK_RANGE_MAP); + DUMPREG(US1_R0); + DUMPREG(US1_R1); + DUMPREG(US1_R2); + DUMPREG(US1_R3); + DUMPREG(US1_R4); + DUMPREG(US1_R5); + DUMPREG(US1_R6); + DUMPREG(US1_R7); + DUMPREG(US2_R0); + DUMPREG(US2_R1); + DUMPREG(US2_R2); + DUMPREG(US2_R3); + DUMPREG(US2_R4); + DUMPREG(US2_R5); + DUMPREG(US2_R6); + DUMPREG(US2_R7); + DUMPREG(US3_R0); + DUMPREG(US3_R1); + DUMPREG(US3_R2); + DUMPREG(US3_R3); + DUMPREG(US3_R4); + DUMPREG(US3_R5); + DUMPREG(US3_R6); + DUMPREG(US3_R7); + DUMPREG(DEI_FRAME_SIZE); + DUMPREG(MDT_BYPASS); + DUMPREG(MDT_SF_THRESHOLD); + DUMPREG(EDI_CONFIG); + DUMPREG(DEI_EDI_LUT_R0); + DUMPREG(DEI_EDI_LUT_R1); + DUMPREG(DEI_EDI_LUT_R2); + DUMPREG(DEI_EDI_LUT_R3); + DUMPREG(DEI_FMD_WINDOW_R0); + DUMPREG(DEI_FMD_WINDOW_R1); + DUMPREG(DEI_FMD_CONTROL_R0); + DUMPREG(DEI_FMD_CONTROL_R1); + DUMPREG(DEI_FMD_STATUS_R0); + DUMPREG(DEI_FMD_STATUS_R1); + DUMPREG(DEI_FMD_STATUS_R2); + DUMPREG(SC_MP_SC0); + DUMPREG(SC_MP_SC1); + DUMPREG(SC_MP_SC2); + DUMPREG(SC_MP_SC3); + DUMPREG(SC_MP_SC4); + DUMPREG(SC_MP_SC5); + DUMPREG(SC_MP_SC6); + DUMPREG(SC_MP_SC8); + DUMPREG(SC_MP_SC9); + DUMPREG(SC_MP_SC10); + DUMPREG(SC_MP_SC11); + DUMPREG(SC_MP_SC12); + DUMPREG(SC_MP_SC13); + DUMPREG(SC_MP_SC17); + DUMPREG(SC_MP_SC18); + DUMPREG(SC_MP_SC19); + DUMPREG(SC_MP_SC20); + DUMPREG(SC_MP_SC21); + DUMPREG(SC_MP_SC22); + DUMPREG(SC_MP_SC23); + DUMPREG(SC_MP_SC24); + DUMPREG(SC_MP_SC25); + DUMPREG(CSC_CSC00); + DUMPREG(CSC_CSC01); + DUMPREG(CSC_CSC02); + DUMPREG(CSC_CSC03); + DUMPREG(CSC_CSC04); + DUMPREG(CSC_CSC05); +#undef DUMPREG +} + +static void add_out_dtd(struct vpe_ctx *ctx, int port) +{ + struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST]; + const struct vpe_port_data *p_data = &port_data[port]; + struct vb2_buffer *vb = ctx->dst_vb; + struct v4l2_rect *c_rect = &q_data->c_rect; + struct vpe_fmt *fmt = q_data->fmt; + const struct vpdma_data_format *vpdma_fmt; + int mv_buf_selector = !ctx->src_mv_buf_selector; + dma_addr_t dma_addr; + u32 flags = 0; + + if (port == VPE_PORT_MV_OUT) { + vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; + dma_addr = ctx->mv_buf_dma[mv_buf_selector]; + } else { + /* to incorporate interleaved formats */ + int plane = fmt->coplanar ? p_data->vb_part : 0; + + vpdma_fmt = fmt->vpdma_fmt[plane]; + dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); + if (!dma_addr) { + vpe_err(ctx->dev, + "acquiring output buffer(%d) dma_addr failed\n", + port); + return; + } + } + + if (q_data->flags & Q_DATA_FRAME_1D) + flags |= VPDMA_DATA_FRAME_1D; + if (q_data->flags & Q_DATA_MODE_TILED) + flags |= VPDMA_DATA_MODE_TILED; + + vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr, + p_data->channel, flags); +} + +static void add_in_dtd(struct vpe_ctx *ctx, int port) +{ + struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC]; + const struct vpe_port_data *p_data = &port_data[port]; + struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index]; + struct v4l2_rect *c_rect = &q_data->c_rect; + struct vpe_fmt *fmt = q_data->fmt; + const struct vpdma_data_format *vpdma_fmt; + int mv_buf_selector = ctx->src_mv_buf_selector; + int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM; + dma_addr_t dma_addr; + u32 flags = 0; + + if (port == VPE_PORT_MV_IN) { + vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; + dma_addr = ctx->mv_buf_dma[mv_buf_selector]; + } else { + /* to incorporate interleaved formats */ + int plane = fmt->coplanar ? p_data->vb_part : 0; + + vpdma_fmt = fmt->vpdma_fmt[plane]; + + dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); + if (!dma_addr) { + vpe_err(ctx->dev, + "acquiring input buffer(%d) dma_addr failed\n", + port); + return; + } + } + + if (q_data->flags & Q_DATA_FRAME_1D) + flags |= VPDMA_DATA_FRAME_1D; + if (q_data->flags & Q_DATA_MODE_TILED) + flags |= VPDMA_DATA_MODE_TILED; + + vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height, + c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags); +} + +/* + * Enable the expected IRQ sources + */ +static void enable_irqs(struct vpe_ctx *ctx) +{ + write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE); + write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT | + VPE_DS1_UV_ERROR_INT); + + vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true); +} + +static void disable_irqs(struct vpe_ctx *ctx) +{ + write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff); + write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff); + + vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false); +} + +/* device_run() - prepares and starts the device + * + * This function is only called when both the source and destination + * buffers are in place. + */ +static void device_run(void *priv) +{ + struct vpe_ctx *ctx = priv; + struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; + + if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) { + ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->src_vbs[2] == NULL); + ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->src_vbs[1] == NULL); + } + + ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->src_vbs[0] == NULL); + ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->dst_vb == NULL); + + /* config descriptors */ + if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) { + vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb); + vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb); + ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr; + ctx->load_mmrs = false; + } + + /* output data descriptors */ + if (ctx->deinterlacing) + add_out_dtd(ctx, VPE_PORT_MV_OUT); + + add_out_dtd(ctx, VPE_PORT_LUMA_OUT); + if (d_q_data->fmt->coplanar) + add_out_dtd(ctx, VPE_PORT_CHROMA_OUT); + + /* input data descriptors */ + if (ctx->deinterlacing) { + add_in_dtd(ctx, VPE_PORT_LUMA3_IN); + add_in_dtd(ctx, VPE_PORT_CHROMA3_IN); + + add_in_dtd(ctx, VPE_PORT_LUMA2_IN); + add_in_dtd(ctx, VPE_PORT_CHROMA2_IN); + } + + add_in_dtd(ctx, VPE_PORT_LUMA1_IN); + add_in_dtd(ctx, VPE_PORT_CHROMA1_IN); + + if (ctx->deinterlacing) + add_in_dtd(ctx, VPE_PORT_MV_IN); + + /* sync on channel control descriptors for input ports */ + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN); + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN); + + if (ctx->deinterlacing) { + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_LUMA2_IN); + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_CHROMA2_IN); + + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_LUMA3_IN); + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_CHROMA3_IN); + + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN); + } + + /* sync on channel control descriptors for output ports */ + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT); + if (d_q_data->fmt->coplanar) + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT); + + if (ctx->deinterlacing) + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT); + + enable_irqs(ctx); + + vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf); + vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list); +} + +static void dei_error(struct vpe_ctx *ctx) +{ + dev_warn(ctx->dev->v4l2_dev.dev, + "received DEI error interrupt\n"); +} + +static void ds1_uv_error(struct vpe_ctx *ctx) +{ + dev_warn(ctx->dev->v4l2_dev.dev, + "received downsampler error interrupt\n"); +} + +static irqreturn_t vpe_irq(int irq_vpe, void *data) +{ + struct vpe_dev *dev = (struct vpe_dev *)data; + struct vpe_ctx *ctx; + struct vpe_q_data *d_q_data; + struct vb2_buffer *s_vb, *d_vb; + struct v4l2_buffer *s_buf, *d_buf; + unsigned long flags; + u32 irqst0, irqst1; + + irqst0 = read_reg(dev, VPE_INT0_STATUS0); + if (irqst0) { + write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0); + vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0); + } + + irqst1 = read_reg(dev, VPE_INT0_STATUS1); + if (irqst1) { + write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1); + vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1); + } + + ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); + if (!ctx) { + vpe_err(dev, "instance released before end of transaction\n"); + goto handled; + } + + if (irqst1) { + if (irqst1 & VPE_DEI_ERROR_INT) { + irqst1 &= ~VPE_DEI_ERROR_INT; + dei_error(ctx); + } + if (irqst1 & VPE_DS1_UV_ERROR_INT) { + irqst1 &= ~VPE_DS1_UV_ERROR_INT; + ds1_uv_error(ctx); + } + } + + if (irqst0) { + if (irqst0 & VPE_INT0_LIST0_COMPLETE) + vpdma_clear_list_stat(ctx->dev->vpdma); + + irqst0 &= ~(VPE_INT0_LIST0_COMPLETE); + } + + if (irqst0 | irqst1) { + dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: " + "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n", + irqst0, irqst1); + } + + disable_irqs(ctx); + + vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); + + vpdma_reset_desc_list(&ctx->desc_list); + + /* the previous dst mv buffer becomes the next src mv buffer */ + ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; + + if (ctx->aborting) + goto finished; + + s_vb = ctx->src_vbs[0]; + d_vb = ctx->dst_vb; + s_buf = &s_vb->v4l2_buf; + d_buf = &d_vb->v4l2_buf; + + d_buf->timestamp = s_buf->timestamp; + if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) { + d_buf->flags |= V4L2_BUF_FLAG_TIMECODE; + d_buf->timecode = s_buf->timecode; + } + d_buf->sequence = ctx->sequence; + d_buf->field = ctx->field; + + d_q_data = &ctx->q_data[Q_DATA_DST]; + if (d_q_data->flags & Q_DATA_INTERLACED) { + if (ctx->field == V4L2_FIELD_BOTTOM) { + ctx->sequence++; + ctx->field = V4L2_FIELD_TOP; + } else { + WARN_ON(ctx->field != V4L2_FIELD_TOP); + ctx->field = V4L2_FIELD_BOTTOM; + } + } else { + ctx->sequence++; + } + + if (ctx->deinterlacing) + s_vb = ctx->src_vbs[2]; + + spin_lock_irqsave(&dev->lock, flags); + v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE); + spin_unlock_irqrestore(&dev->lock, flags); + + if (ctx->deinterlacing) { + ctx->src_vbs[2] = ctx->src_vbs[1]; + ctx->src_vbs[1] = ctx->src_vbs[0]; + } + + ctx->bufs_completed++; + if (ctx->bufs_completed < ctx->bufs_per_job) { + device_run(ctx); + goto handled; + } + +finished: + vpe_dbg(ctx->dev, "finishing transaction\n"); + ctx->bufs_completed = 0; + v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx); +handled: + return IRQ_HANDLED; +} + +/* + * video ioctls + */ +static int vpe_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1); + strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1); + strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info)); + cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; +} + +static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type) +{ + int i, index; + struct vpe_fmt *fmt = NULL; + + index = 0; + for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) { + if (vpe_formats[i].types & type) { + if (index == f->index) { + fmt = &vpe_formats[i]; + break; + } + index++; + } + } + + if (!fmt) + return -EINVAL; + + strncpy(f->description, fmt->name, sizeof(f->description) - 1); + f->pixelformat = fmt->fourcc; + return 0; +} + +static int vpe_enum_fmt(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + if (V4L2_TYPE_IS_OUTPUT(f->type)) + return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT); + + return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE); +} + +static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; + struct vpe_ctx *ctx = file2ctx(file); + struct vb2_queue *vq; + struct vpe_q_data *q_data; + int i; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) + return -EINVAL; + + q_data = get_q_data(ctx, f->type); + + pix->width = q_data->width; + pix->height = q_data->height; + pix->pixelformat = q_data->fmt->fourcc; + pix->field = q_data->field; + + if (V4L2_TYPE_IS_OUTPUT(f->type)) { + pix->colorspace = q_data->colorspace; + } else { + struct vpe_q_data *s_q_data; + + /* get colorspace from the source queue */ + s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + + pix->colorspace = s_q_data->colorspace; + } + + pix->num_planes = q_data->fmt->coplanar ? 2 : 1; + + for (i = 0; i < pix->num_planes; i++) { + pix->plane_fmt[i].bytesperline = q_data->bytesperline[i]; + pix->plane_fmt[i].sizeimage = q_data->sizeimage[i]; + } + + return 0; +} + +static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, + struct vpe_fmt *fmt, int type) +{ + struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *plane_fmt; + int i; + + if (!fmt || !(fmt->types & type)) { + vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n", + pix->pixelformat); + return -EINVAL; + } + + if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE) + pix->field = V4L2_FIELD_NONE; + + v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN, + &pix->height, MIN_H, MAX_H, H_ALIGN, + S_ALIGN); + + pix->num_planes = fmt->coplanar ? 2 : 1; + pix->pixelformat = fmt->fourcc; + + if (type == VPE_FMT_TYPE_CAPTURE) { + struct vpe_q_data *s_q_data; + + /* get colorspace from the source queue */ + s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + + pix->colorspace = s_q_data->colorspace; + } else { + if (!pix->colorspace) + pix->colorspace = V4L2_COLORSPACE_SMPTE240M; + } + + for (i = 0; i < pix->num_planes; i++) { + int depth; + + plane_fmt = &pix->plane_fmt[i]; + depth = fmt->vpdma_fmt[i]->depth; + + if (i == VPE_LUMA) + plane_fmt->bytesperline = + round_up((pix->width * depth) >> 3, + 1 << L_ALIGN); + else + plane_fmt->bytesperline = pix->width; + + plane_fmt->sizeimage = + (pix->height * pix->width * depth) >> 3; + } + + return 0; +} + +static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f) +{ + struct vpe_ctx *ctx = file2ctx(file); + struct vpe_fmt *fmt = find_format(f); + + if (V4L2_TYPE_IS_OUTPUT(f->type)) + return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT); + else + return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE); +} + +static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *plane_fmt; + struct vpe_q_data *q_data; + struct vb2_queue *vq; + int i; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) + return -EINVAL; + + if (vb2_is_busy(vq)) { + vpe_err(ctx->dev, "queue busy\n"); + return -EBUSY; + } + + q_data = get_q_data(ctx, f->type); + if (!q_data) + return -EINVAL; + + q_data->fmt = find_format(f); + q_data->width = pix->width; + q_data->height = pix->height; + q_data->colorspace = pix->colorspace; + q_data->field = pix->field; + + for (i = 0; i < pix->num_planes; i++) { + plane_fmt = &pix->plane_fmt[i]; + + q_data->bytesperline[i] = plane_fmt->bytesperline; + q_data->sizeimage[i] = plane_fmt->sizeimage; + } + + q_data->c_rect.left = 0; + q_data->c_rect.top = 0; + q_data->c_rect.width = q_data->width; + q_data->c_rect.height = q_data->height; + + if (q_data->field == V4L2_FIELD_ALTERNATE) + q_data->flags |= Q_DATA_INTERLACED; + else + q_data->flags &= ~Q_DATA_INTERLACED; + + vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d", + f->type, q_data->width, q_data->height, q_data->fmt->fourcc, + q_data->bytesperline[VPE_LUMA]); + if (q_data->fmt->coplanar) + vpe_dbg(ctx->dev, " bpl_uv %d\n", + q_data->bytesperline[VPE_CHROMA]); + + return 0; +} + +static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f) +{ + int ret; + struct vpe_ctx *ctx = file2ctx(file); + + ret = vpe_try_fmt(file, priv, f); + if (ret) + return ret; + + ret = __vpe_s_fmt(ctx, f); + if (ret) + return ret; + + if (V4L2_TYPE_IS_OUTPUT(f->type)) + set_src_registers(ctx); + else + set_dst_registers(ctx); + + return set_srcdst_params(ctx); +} + +static int vpe_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *reqbufs) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); +} + +static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); +} + +static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); +} + +static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); +} + +static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); +} + +static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) +{ + struct vpe_ctx *ctx = file2ctx(file); + + vpe_dump_regs(ctx->dev); + vpdma_dump_regs(ctx->dev->vpdma); + + return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); +} + +/* + * defines number of buffers/frames a context can process with VPE before + * switching to a different context. default value is 1 buffer per context + */ +#define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0) + +static int vpe_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct vpe_ctx *ctx = + container_of(ctrl->handler, struct vpe_ctx, hdl); + + switch (ctrl->id) { + case V4L2_CID_VPE_BUFS_PER_JOB: + ctx->bufs_per_job = ctrl->val; + break; + + default: + vpe_err(ctx->dev, "Invalid control\n"); + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops vpe_ctrl_ops = { + .s_ctrl = vpe_s_ctrl, +}; + +static const struct v4l2_ioctl_ops vpe_ioctl_ops = { + .vidioc_querycap = vpe_querycap, + + .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt, + .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt, + .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt, + .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt, + + .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt, + .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt, + .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt, + .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt, + + .vidioc_reqbufs = vpe_reqbufs, + .vidioc_querybuf = vpe_querybuf, + + .vidioc_qbuf = vpe_qbuf, + .vidioc_dqbuf = vpe_dqbuf, + + .vidioc_streamon = vpe_streamon, + .vidioc_streamoff = vpe_streamoff, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +/* + * Queue operations + */ +static int vpe_queue_setup(struct vb2_queue *vq, + const struct v4l2_format *fmt, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], void *alloc_ctxs[]) +{ + int i; + struct vpe_ctx *ctx = vb2_get_drv_priv(vq); + struct vpe_q_data *q_data; + + q_data = get_q_data(ctx, vq->type); + + *nplanes = q_data->fmt->coplanar ? 2 : 1; + + for (i = 0; i < *nplanes; i++) { + sizes[i] = q_data->sizeimage[i]; + alloc_ctxs[i] = ctx->dev->alloc_ctx; + } + + vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers, + sizes[VPE_LUMA]); + if (q_data->fmt->coplanar) + vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]); + + return 0; +} + +static int vpe_buf_prepare(struct vb2_buffer *vb) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct vpe_q_data *q_data; + int i, num_planes; + + vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type); + + q_data = get_q_data(ctx, vb->vb2_queue->type); + num_planes = q_data->fmt->coplanar ? 2 : 1; + + for (i = 0; i < num_planes; i++) { + if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { + vpe_err(ctx->dev, + "data will not fit into plane (%lu < %lu)\n", + vb2_plane_size(vb, i), + (long) q_data->sizeimage[i]); + return -EINVAL; + } + } + + for (i = 0; i < num_planes; i++) + vb2_set_plane_payload(vb, i, q_data->sizeimage[i]); + + return 0; +} + +static void vpe_buf_queue(struct vb2_buffer *vb) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); +} + +static void vpe_wait_prepare(struct vb2_queue *q) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(q); + vpe_unlock(ctx); +} + +static void vpe_wait_finish(struct vb2_queue *q) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(q); + vpe_lock(ctx); +} + +static struct vb2_ops vpe_qops = { + .queue_setup = vpe_queue_setup, + .buf_prepare = vpe_buf_prepare, + .buf_queue = vpe_buf_queue, + .wait_prepare = vpe_wait_prepare, + .wait_finish = vpe_wait_finish, +}; + +static int queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct vpe_ctx *ctx = priv; + int ret; + + memset(src_vq, 0, sizeof(*src_vq)); + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_MMAP; + src_vq->drv_priv = ctx; + src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + src_vq->ops = &vpe_qops; + src_vq->mem_ops = &vb2_dma_contig_memops; + src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; + + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + memset(dst_vq, 0, sizeof(*dst_vq)); + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + dst_vq->io_modes = VB2_MMAP; + dst_vq->drv_priv = ctx; + dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + dst_vq->ops = &vpe_qops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; + + return vb2_queue_init(dst_vq); +} + +static const struct v4l2_ctrl_config vpe_bufs_per_job = { + .ops = &vpe_ctrl_ops, + .id = V4L2_CID_VPE_BUFS_PER_JOB, + .name = "Buffers Per Transaction", + .type = V4L2_CTRL_TYPE_INTEGER, + .def = VPE_DEF_BUFS_PER_JOB, + .min = 1, + .max = VIDEO_MAX_FRAME, + .step = 1, +}; + +/* + * File operations + */ +static int vpe_open(struct file *file) +{ + struct vpe_dev *dev = video_drvdata(file); + struct vpe_ctx *ctx = NULL; + struct vpe_q_data *s_q_data; + struct v4l2_ctrl_handler *hdl; + int ret; + + vpe_dbg(dev, "vpe_open\n"); + + if (!dev->vpdma->ready) { + vpe_err(dev, "vpdma firmware not loaded\n"); + return -ENODEV; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + + if (mutex_lock_interruptible(&dev->dev_mutex)) { + ret = -ERESTARTSYS; + goto free_ctx; + } + + ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE, + VPDMA_LIST_TYPE_NORMAL); + if (ret != 0) + goto unlock; + + ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb)); + if (ret != 0) + goto free_desc_list; + + init_adb_hdrs(ctx); + + v4l2_fh_init(&ctx->fh, video_devdata(file)); + file->private_data = &ctx->fh; + + hdl = &ctx->hdl; + v4l2_ctrl_handler_init(hdl, 1); + v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL); + if (hdl->error) { + ret = hdl->error; + goto exit_fh; + } + ctx->fh.ctrl_handler = hdl; + v4l2_ctrl_handler_setup(hdl); + + s_q_data = &ctx->q_data[Q_DATA_SRC]; + s_q_data->fmt = &vpe_formats[2]; + s_q_data->width = 1920; + s_q_data->height = 1080; + s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height * + s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; + s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M; + s_q_data->field = V4L2_FIELD_NONE; + s_q_data->c_rect.left = 0; + s_q_data->c_rect.top = 0; + s_q_data->c_rect.width = s_q_data->width; + s_q_data->c_rect.height = s_q_data->height; + s_q_data->flags = 0; + + ctx->q_data[Q_DATA_DST] = *s_q_data; + + set_dei_shadow_registers(ctx); + set_src_registers(ctx); + set_dst_registers(ctx); + ret = set_srcdst_params(ctx); + if (ret) + goto exit_fh; + + ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); + + if (IS_ERR(ctx->m2m_ctx)) { + ret = PTR_ERR(ctx->m2m_ctx); + goto exit_fh; + } + + v4l2_fh_add(&ctx->fh); + + /* + * for now, just report the creation of the first instance, we can later + * optimize the driver to enable or disable clocks when the first + * instance is created or the last instance released + */ + if (atomic_inc_return(&dev->num_instances) == 1) + vpe_dbg(dev, "first instance created\n"); + + ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB; + + ctx->load_mmrs = true; + + vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n", + ctx, ctx->m2m_ctx); + + mutex_unlock(&dev->dev_mutex); + + return 0; +exit_fh: + v4l2_ctrl_handler_free(hdl); + v4l2_fh_exit(&ctx->fh); + vpdma_free_desc_buf(&ctx->mmr_adb); +free_desc_list: + vpdma_free_desc_list(&ctx->desc_list); +unlock: + mutex_unlock(&dev->dev_mutex); +free_ctx: + kfree(ctx); + return ret; +} + +static int vpe_release(struct file *file) +{ + struct vpe_dev *dev = video_drvdata(file); + struct vpe_ctx *ctx = file2ctx(file); + + vpe_dbg(dev, "releasing instance %p\n", ctx); + + mutex_lock(&dev->dev_mutex); + free_vbs(ctx); + free_mv_buffers(ctx); + vpdma_free_desc_list(&ctx->desc_list); + vpdma_free_desc_buf(&ctx->mmr_adb); + + v4l2_fh_del(&ctx->fh); + v4l2_fh_exit(&ctx->fh); + v4l2_ctrl_handler_free(&ctx->hdl); + v4l2_m2m_ctx_release(ctx->m2m_ctx); + + kfree(ctx); + + /* + * for now, just report the release of the last instance, we can later + * optimize the driver to enable or disable clocks when the first + * instance is created or the last instance released + */ + if (atomic_dec_return(&dev->num_instances) == 0) + vpe_dbg(dev, "last instance released\n"); + + mutex_unlock(&dev->dev_mutex); + + return 0; +} + +static unsigned int vpe_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct vpe_ctx *ctx = file2ctx(file); + struct vpe_dev *dev = ctx->dev; + int ret; + + mutex_lock(&dev->dev_mutex); + ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); + mutex_unlock(&dev->dev_mutex); + return ret; +} + +static int vpe_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct vpe_ctx *ctx = file2ctx(file); + struct vpe_dev *dev = ctx->dev; + int ret; + + if (mutex_lock_interruptible(&dev->dev_mutex)) + return -ERESTARTSYS; + ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); + mutex_unlock(&dev->dev_mutex); + return ret; +} + +static const struct v4l2_file_operations vpe_fops = { + .owner = THIS_MODULE, + .open = vpe_open, + .release = vpe_release, + .poll = vpe_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vpe_mmap, +}; + +static struct video_device vpe_videodev = { + .name = VPE_MODULE_NAME, + .fops = &vpe_fops, + .ioctl_ops = &vpe_ioctl_ops, + .minor = -1, + .release = video_device_release, + .vfl_dir = VFL_DIR_M2M, +}; + +static struct v4l2_m2m_ops m2m_ops = { + .device_run = device_run, + .job_ready = job_ready, + .job_abort = job_abort, + .lock = vpe_lock, + .unlock = vpe_unlock, +}; + +static int vpe_runtime_get(struct platform_device *pdev) +{ + int r; + + dev_dbg(&pdev->dev, "vpe_runtime_get\n"); + + r = pm_runtime_get_sync(&pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void vpe_runtime_put(struct platform_device *pdev) +{ + + int r; + + dev_dbg(&pdev->dev, "vpe_runtime_put\n"); + + r = pm_runtime_put_sync(&pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} + +static int vpe_probe(struct platform_device *pdev) +{ + struct vpe_dev *dev; + struct video_device *vfd; + struct resource *res; + int ret, irq, func; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + spin_lock_init(&dev->lock); + + ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); + if (ret) + return ret; + + atomic_set(&dev->num_instances, 0); + mutex_init(&dev->dev_mutex); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top"); + /* + * HACK: we get resource info from device tree in the form of a list of + * VPE sub blocks, the driver currently uses only the base of vpe_top + * for register access, the driver should be changed later to access + * registers based on the sub block base addresses + */ + dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K); + if (IS_ERR(dev->base)) { + ret = PTR_ERR(dev->base); + goto v4l2_dev_unreg; + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME, + dev); + if (ret) + goto v4l2_dev_unreg; + + platform_set_drvdata(pdev, dev); + + dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); + if (IS_ERR(dev->alloc_ctx)) { + vpe_err(dev, "Failed to alloc vb2 context\n"); + ret = PTR_ERR(dev->alloc_ctx); + goto v4l2_dev_unreg; + } + + dev->m2m_dev = v4l2_m2m_init(&m2m_ops); + if (IS_ERR(dev->m2m_dev)) { + vpe_err(dev, "Failed to init mem2mem device\n"); + ret = PTR_ERR(dev->m2m_dev); + goto rel_ctx; + } + + pm_runtime_enable(&pdev->dev); + + ret = vpe_runtime_get(pdev); + if (ret) + goto rel_m2m; + + /* Perform clk enable followed by reset */ + vpe_set_clock_enable(dev, 1); + + vpe_top_reset(dev); + + func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK, + VPE_PID_FUNC_SHIFT); + vpe_dbg(dev, "VPE PID function %x\n", func); + + vpe_top_vpdma_reset(dev); + + dev->vpdma = vpdma_create(pdev); + if (IS_ERR(dev->vpdma)) + goto runtime_put; + + vfd = &dev->vfd; + *vfd = vpe_videodev; + vfd->lock = &dev->dev_mutex; + vfd->v4l2_dev = &dev->v4l2_dev; + + ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + if (ret) { + vpe_err(dev, "Failed to register video device\n"); + goto runtime_put; + } + + video_set_drvdata(vfd, dev); + snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name); + dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n", + vfd->num); + + return 0; + +runtime_put: + vpe_runtime_put(pdev); +rel_m2m: + pm_runtime_disable(&pdev->dev); + v4l2_m2m_release(dev->m2m_dev); +rel_ctx: + vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); +v4l2_dev_unreg: + v4l2_device_unregister(&dev->v4l2_dev); + + return ret; +} + +static int vpe_remove(struct platform_device *pdev) +{ + struct vpe_dev *dev = + (struct vpe_dev *) platform_get_drvdata(pdev); + + v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME); + + v4l2_m2m_release(dev->m2m_dev); + video_unregister_device(&dev->vfd); + v4l2_device_unregister(&dev->v4l2_dev); + vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); + + vpe_set_clock_enable(dev, 0); + vpe_runtime_put(pdev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#if defined(CONFIG_OF) +static const struct of_device_id vpe_of_match[] = { + { + .compatible = "ti,vpe", + }, + {}, +}; +#else +#define vpe_of_match NULL +#endif + +static struct platform_driver vpe_pdrv = { + .probe = vpe_probe, + .remove = vpe_remove, + .driver = { + .name = VPE_MODULE_NAME, + .owner = THIS_MODULE, + .of_match_table = vpe_of_match, + }, +}; + +static void __exit vpe_exit(void) +{ + platform_driver_unregister(&vpe_pdrv); +} + +static int __init vpe_init(void) +{ + return platform_driver_register(&vpe_pdrv); +} + +module_init(vpe_init); +module_exit(vpe_exit); + +MODULE_DESCRIPTION("TI VPE driver"); +MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h new file mode 100644 index 000000000000..ed214e828398 --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpe_regs.h @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __TI_VPE_REGS_H +#define __TI_VPE_REGS_H + +/* VPE register offsets and field selectors */ + +/* VPE top level regs */ +#define VPE_PID 0x0000 +#define VPE_PID_MINOR_MASK 0x3f +#define VPE_PID_MINOR_SHIFT 0 +#define VPE_PID_CUSTOM_MASK 0x03 +#define VPE_PID_CUSTOM_SHIFT 6 +#define VPE_PID_MAJOR_MASK 0x07 +#define VPE_PID_MAJOR_SHIFT 8 +#define VPE_PID_RTL_MASK 0x1f +#define VPE_PID_RTL_SHIFT 11 +#define VPE_PID_FUNC_MASK 0xfff +#define VPE_PID_FUNC_SHIFT 16 +#define VPE_PID_SCHEME_MASK 0x03 +#define VPE_PID_SCHEME_SHIFT 30 + +#define VPE_SYSCONFIG 0x0010 +#define VPE_SYSCONFIG_IDLE_MASK 0x03 +#define VPE_SYSCONFIG_IDLE_SHIFT 2 +#define VPE_SYSCONFIG_STANDBY_MASK 0x03 +#define VPE_SYSCONFIG_STANDBY_SHIFT 4 +#define VPE_FORCE_IDLE_MODE 0 +#define VPE_NO_IDLE_MODE 1 +#define VPE_SMART_IDLE_MODE 2 +#define VPE_SMART_IDLE_WAKEUP_MODE 3 +#define VPE_FORCE_STANDBY_MODE 0 +#define VPE_NO_STANDBY_MODE 1 +#define VPE_SMART_STANDBY_MODE 2 +#define VPE_SMART_STANDBY_WAKEUP_MODE 3 + +#define VPE_INT0_STATUS0_RAW_SET 0x0020 +#define VPE_INT0_STATUS0_RAW VPE_INT0_STATUS0_RAW_SET +#define VPE_INT0_STATUS0_CLR 0x0028 +#define VPE_INT0_STATUS0 VPE_INT0_STATUS0_CLR +#define VPE_INT0_ENABLE0_SET 0x0030 +#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET +#define VPE_INT0_ENABLE0_CLR 0x0038 +#define VPE_INT0_LIST0_COMPLETE (1 << 0) +#define VPE_INT0_LIST0_NOTIFY (1 << 1) +#define VPE_INT0_LIST1_COMPLETE (1 << 2) +#define VPE_INT0_LIST1_NOTIFY (1 << 3) +#define VPE_INT0_LIST2_COMPLETE (1 << 4) +#define VPE_INT0_LIST2_NOTIFY (1 << 5) +#define VPE_INT0_LIST3_COMPLETE (1 << 6) +#define VPE_INT0_LIST3_NOTIFY (1 << 7) +#define VPE_INT0_LIST4_COMPLETE (1 << 8) +#define VPE_INT0_LIST4_NOTIFY (1 << 9) +#define VPE_INT0_LIST5_COMPLETE (1 << 10) +#define VPE_INT0_LIST5_NOTIFY (1 << 11) +#define VPE_INT0_LIST6_COMPLETE (1 << 12) +#define VPE_INT0_LIST6_NOTIFY (1 << 13) +#define VPE_INT0_LIST7_COMPLETE (1 << 14) +#define VPE_INT0_LIST7_NOTIFY (1 << 15) +#define VPE_INT0_DESCRIPTOR (1 << 16) +#define VPE_DEI_FMD_INT (1 << 18) + +#define VPE_INT0_STATUS1_RAW_SET 0x0024 +#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET +#define VPE_INT0_STATUS1_CLR 0x002c +#define VPE_INT0_STATUS1 VPE_INT0_STATUS1_CLR +#define VPE_INT0_ENABLE1_SET 0x0034 +#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET +#define VPE_INT0_ENABLE1_CLR 0x003c +#define VPE_INT0_CHANNEL_GROUP0 (1 << 0) +#define VPE_INT0_CHANNEL_GROUP1 (1 << 1) +#define VPE_INT0_CHANNEL_GROUP2 (1 << 2) +#define VPE_INT0_CHANNEL_GROUP3 (1 << 3) +#define VPE_INT0_CHANNEL_GROUP4 (1 << 4) +#define VPE_INT0_CHANNEL_GROUP5 (1 << 5) +#define VPE_INT0_CLIENT (1 << 7) +#define VPE_DEI_ERROR_INT (1 << 16) +#define VPE_DS1_UV_ERROR_INT (1 << 22) + +#define VPE_INTC_EOI 0x00a0 + +#define VPE_CLK_ENABLE 0x0100 +#define VPE_VPEDMA_CLK_ENABLE (1 << 0) +#define VPE_DATA_PATH_CLK_ENABLE (1 << 1) + +#define VPE_CLK_RESET 0x0104 +#define VPE_VPDMA_CLK_RESET_MASK 0x1 +#define VPE_VPDMA_CLK_RESET_SHIFT 0 +#define VPE_DATA_PATH_CLK_RESET_MASK 0x1 +#define VPE_DATA_PATH_CLK_RESET_SHIFT 1 +#define VPE_MAIN_RESET_MASK 0x1 +#define VPE_MAIN_RESET_SHIFT 31 + +#define VPE_CLK_FORMAT_SELECT 0x010c +#define VPE_CSC_SRC_SELECT_MASK 0x03 +#define VPE_CSC_SRC_SELECT_SHIFT 0 +#define VPE_RGB_OUT_SELECT (1 << 8) +#define VPE_DS_SRC_SELECT_MASK 0x07 +#define VPE_DS_SRC_SELECT_SHIFT 9 +#define VPE_DS_BYPASS (1 << 16) +#define VPE_COLOR_SEPARATE_422 (1 << 18) + +#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT) +#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT) + +#define VPE_CLK_RANGE_MAP 0x011c +#define VPE_RANGE_RANGE_MAP_Y_MASK 0x07 +#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0 +#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07 +#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3 +#define VPE_RANGE_MAP_ON (1 << 6) +#define VPE_RANGE_REDUCTION_ON (1 << 28) + +/* VPE chrominance upsampler regs */ +#define VPE_US1_R0 0x0304 +#define VPE_US2_R0 0x0404 +#define VPE_US3_R0 0x0504 +#define VPE_US_C1_MASK 0x3fff +#define VPE_US_C1_SHIFT 2 +#define VPE_US_C0_MASK 0x3fff +#define VPE_US_C0_SHIFT 18 +#define VPE_US_MODE_MASK 0x03 +#define VPE_US_MODE_SHIFT 16 +#define VPE_ANCHOR_FID0_C1_MASK 0x3fff +#define VPE_ANCHOR_FID0_C1_SHIFT 2 +#define VPE_ANCHOR_FID0_C0_MASK 0x3fff +#define VPE_ANCHOR_FID0_C0_SHIFT 18 + +#define VPE_US1_R1 0x0308 +#define VPE_US2_R1 0x0408 +#define VPE_US3_R1 0x0508 +#define VPE_ANCHOR_FID0_C3_MASK 0x3fff +#define VPE_ANCHOR_FID0_C3_SHIFT 2 +#define VPE_ANCHOR_FID0_C2_MASK 0x3fff +#define VPE_ANCHOR_FID0_C2_SHIFT 18 + +#define VPE_US1_R2 0x030c +#define VPE_US2_R2 0x040c +#define VPE_US3_R2 0x050c +#define VPE_INTERP_FID0_C1_MASK 0x3fff +#define VPE_INTERP_FID0_C1_SHIFT 2 +#define VPE_INTERP_FID0_C0_MASK 0x3fff +#define VPE_INTERP_FID0_C0_SHIFT 18 + +#define VPE_US1_R3 0x0310 +#define VPE_US2_R3 0x0410 +#define VPE_US3_R3 0x0510 +#define VPE_INTERP_FID0_C3_MASK 0x3fff +#define VPE_INTERP_FID0_C3_SHIFT 2 +#define VPE_INTERP_FID0_C2_MASK 0x3fff +#define VPE_INTERP_FID0_C2_SHIFT 18 + +#define VPE_US1_R4 0x0314 +#define VPE_US2_R4 0x0414 +#define VPE_US3_R4 0x0514 +#define VPE_ANCHOR_FID1_C1_MASK 0x3fff +#define VPE_ANCHOR_FID1_C1_SHIFT 2 +#define VPE_ANCHOR_FID1_C0_MASK 0x3fff +#define VPE_ANCHOR_FID1_C0_SHIFT 18 + +#define VPE_US1_R5 0x0318 +#define VPE_US2_R5 0x0418 +#define VPE_US3_R5 0x0518 +#define VPE_ANCHOR_FID1_C3_MASK 0x3fff +#define VPE_ANCHOR_FID1_C3_SHIFT 2 +#define VPE_ANCHOR_FID1_C2_MASK 0x3fff +#define VPE_ANCHOR_FID1_C2_SHIFT 18 + +#define VPE_US1_R6 0x031c +#define VPE_US2_R6 0x041c +#define VPE_US3_R6 0x051c +#define VPE_INTERP_FID1_C1_MASK 0x3fff +#define VPE_INTERP_FID1_C1_SHIFT 2 +#define VPE_INTERP_FID1_C0_MASK 0x3fff +#define VPE_INTERP_FID1_C0_SHIFT 18 + +#define VPE_US1_R7 0x0320 +#define VPE_US2_R7 0x0420 +#define VPE_US3_R7 0x0520 +#define VPE_INTERP_FID0_C3_MASK 0x3fff +#define VPE_INTERP_FID0_C3_SHIFT 2 +#define VPE_INTERP_FID0_C2_MASK 0x3fff +#define VPE_INTERP_FID0_C2_SHIFT 18 + +/* VPE de-interlacer regs */ +#define VPE_DEI_FRAME_SIZE 0x0600 +#define VPE_DEI_WIDTH_MASK 0x07ff +#define VPE_DEI_WIDTH_SHIFT 0 +#define VPE_DEI_HEIGHT_MASK 0x07ff +#define VPE_DEI_HEIGHT_SHIFT 16 +#define VPE_DEI_INTERLACE_BYPASS (1 << 29) +#define VPE_DEI_FIELD_FLUSH (1 << 30) +#define VPE_DEI_PROGRESSIVE (1 << 31) + +#define VPE_MDT_BYPASS 0x0604 +#define VPE_MDT_TEMPMAX_BYPASS (1 << 0) +#define VPE_MDT_SPATMAX_BYPASS (1 << 1) + +#define VPE_MDT_SF_THRESHOLD 0x0608 +#define VPE_MDT_SF_SC_THR1_MASK 0xff +#define VPE_MDT_SF_SC_THR1_SHIFT 0 +#define VPE_MDT_SF_SC_THR2_MASK 0xff +#define VPE_MDT_SF_SC_THR2_SHIFT 0 +#define VPE_MDT_SF_SC_THR3_MASK 0xff +#define VPE_MDT_SF_SC_THR3_SHIFT 0 + +#define VPE_EDI_CONFIG 0x060c +#define VPE_EDI_INP_MODE_MASK 0x03 +#define VPE_EDI_INP_MODE_SHIFT 0 +#define VPE_EDI_ENABLE_3D (1 << 2) +#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3) +#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff +#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8 +#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff +#define VPE_EDI_DIR_COR_LOWER_THR_SHIFT 16 +#define VPE_EDI_COR_SCALE_FACTOR_MASK 0xff +#define VPE_EDI_COR_SCALE_FACTOR_SHIFT 23 + +#define VPE_DEI_EDI_LUT_R0 0x0610 +#define VPE_EDI_LUT0_MASK 0x1f +#define VPE_EDI_LUT0_SHIFT 0 +#define VPE_EDI_LUT1_MASK 0x1f +#define VPE_EDI_LUT1_SHIFT 8 +#define VPE_EDI_LUT2_MASK 0x1f +#define VPE_EDI_LUT2_SHIFT 16 +#define VPE_EDI_LUT3_MASK 0x1f +#define VPE_EDI_LUT3_SHIFT 24 + +#define VPE_DEI_EDI_LUT_R1 0x0614 +#define VPE_EDI_LUT0_MASK 0x1f +#define VPE_EDI_LUT0_SHIFT 0 +#define VPE_EDI_LUT1_MASK 0x1f +#define VPE_EDI_LUT1_SHIFT 8 +#define VPE_EDI_LUT2_MASK 0x1f +#define VPE_EDI_LUT2_SHIFT 16 +#define VPE_EDI_LUT3_MASK 0x1f +#define VPE_EDI_LUT3_SHIFT 24 + +#define VPE_DEI_EDI_LUT_R2 0x0618 +#define VPE_EDI_LUT4_MASK 0x1f +#define VPE_EDI_LUT4_SHIFT 0 +#define VPE_EDI_LUT5_MASK 0x1f +#define VPE_EDI_LUT5_SHIFT 8 +#define VPE_EDI_LUT6_MASK 0x1f +#define VPE_EDI_LUT6_SHIFT 16 +#define VPE_EDI_LUT7_MASK 0x1f +#define VPE_EDI_LUT7_SHIFT 24 + +#define VPE_DEI_EDI_LUT_R3 0x061c +#define VPE_EDI_LUT8_MASK 0x1f +#define VPE_EDI_LUT8_SHIFT 0 +#define VPE_EDI_LUT9_MASK 0x1f +#define VPE_EDI_LUT9_SHIFT 8 +#define VPE_EDI_LUT10_MASK 0x1f +#define VPE_EDI_LUT10_SHIFT 16 +#define VPE_EDI_LUT11_MASK 0x1f +#define VPE_EDI_LUT11_SHIFT 24 + +#define VPE_DEI_FMD_WINDOW_R0 0x0620 +#define VPE_FMD_WINDOW_MINX_MASK 0x07ff +#define VPE_FMD_WINDOW_MINX_SHIFT 0 +#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff +#define VPE_FMD_WINDOW_MAXX_SHIFT 16 +#define VPE_FMD_WINDOW_ENABLE (1 << 31) + +#define VPE_DEI_FMD_WINDOW_R1 0x0624 +#define VPE_FMD_WINDOW_MINY_MASK 0x07ff +#define VPE_FMD_WINDOW_MINY_SHIFT 0 +#define VPE_FMD_WINDOW_MAXY_MASK 0x07ff +#define VPE_FMD_WINDOW_MAXY_SHIFT 16 + +#define VPE_DEI_FMD_CONTROL_R0 0x0628 +#define VPE_FMD_ENABLE (1 << 0) +#define VPE_FMD_LOCK (1 << 1) +#define VPE_FMD_JAM_DIR (1 << 2) +#define VPE_FMD_BED_ENABLE (1 << 3) +#define VPE_FMD_CAF_FIELD_THR_MASK 0xff +#define VPE_FMD_CAF_FIELD_THR_SHIFT 16 +#define VPE_FMD_CAF_LINE_THR_MASK 0xff +#define VPE_FMD_CAF_LINE_THR_SHIFT 24 + +#define VPE_DEI_FMD_CONTROL_R1 0x062c +#define VPE_FMD_CAF_THR_MASK 0x000fffff +#define VPE_FMD_CAF_THR_SHIFT 0 + +#define VPE_DEI_FMD_STATUS_R0 0x0630 +#define VPE_FMD_CAF_MASK 0x000fffff +#define VPE_FMD_CAF_SHIFT 0 +#define VPE_FMD_RESET (1 << 24) + +#define VPE_DEI_FMD_STATUS_R1 0x0634 +#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff +#define VPE_FMD_FIELD_DIFF_SHIFT 0 + +#define VPE_DEI_FMD_STATUS_R2 0x0638 +#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff +#define VPE_FMD_FRAME_DIFF_SHIFT 0 + +/* VPE scaler regs */ +#define VPE_SC_MP_SC0 0x0700 +#define VPE_INTERLACE_O (1 << 0) +#define VPE_LINEAR (1 << 1) +#define VPE_SC_BYPASS (1 << 2) +#define VPE_INVT_FID (1 << 3) +#define VPE_USE_RAV (1 << 4) +#define VPE_ENABLE_EV (1 << 5) +#define VPE_AUTO_HS (1 << 6) +#define VPE_DCM_2X (1 << 7) +#define VPE_DCM_4X (1 << 8) +#define VPE_HP_BYPASS (1 << 9) +#define VPE_INTERLACE_I (1 << 10) +#define VPE_ENABLE_SIN2_VER_INTP (1 << 11) +#define VPE_Y_PK_EN (1 << 14) +#define VPE_TRIM (1 << 15) +#define VPE_SELFGEN_FID (1 << 16) + +#define VPE_SC_MP_SC1 0x0704 +#define VPE_ROW_ACC_INC_MASK 0x07ffffff +#define VPE_ROW_ACC_INC_SHIFT 0 + +#define VPE_SC_MP_SC2 0x0708 +#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff +#define VPE_ROW_ACC_OFFSET_SHIFT 0 + +#define VPE_SC_MP_SC3 0x070c +#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff +#define VPE_ROW_ACC_OFFSET_B_SHIFT 0 + +#define VPE_SC_MP_SC4 0x0710 +#define VPE_TAR_H_MASK 0x07ff +#define VPE_TAR_H_SHIFT 0 +#define VPE_TAR_W_MASK 0x07ff +#define VPE_TAR_W_SHIFT 12 +#define VPE_LIN_ACC_INC_U_MASK 0x07 +#define VPE_LIN_ACC_INC_U_SHIFT 24 +#define VPE_NLIN_ACC_INIT_U_MASK 0x07 +#define VPE_NLIN_ACC_INIT_U_SHIFT 28 + +#define VPE_SC_MP_SC5 0x0714 +#define VPE_SRC_H_MASK 0x07ff +#define VPE_SRC_H_SHIFT 0 +#define VPE_SRC_W_MASK 0x07ff +#define VPE_SRC_W_SHIFT 12 +#define VPE_NLIN_ACC_INC_U_MASK 0x07 +#define VPE_NLIN_ACC_INC_U_SHIFT 24 + +#define VPE_SC_MP_SC6 0x0718 +#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff +#define VPE_ROW_ACC_INIT_RAV_SHIFT 0 +#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff +#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10 + +#define VPE_SC_MP_SC8 0x0720 +#define VPE_NLIN_LEFT_MASK 0x07ff +#define VPE_NLIN_LEFT_SHIFT 0 +#define VPE_NLIN_RIGHT_MASK 0x07ff +#define VPE_NLIN_RIGHT_SHIFT 12 + +#define VPE_SC_MP_SC9 0x0724 +#define VPE_LIN_ACC_INC VPE_SC_MP_SC9 + +#define VPE_SC_MP_SC10 0x0728 +#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10 + +#define VPE_SC_MP_SC11 0x072c +#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11 + +#define VPE_SC_MP_SC12 0x0730 +#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff +#define VPE_COL_ACC_OFFSET_SHIFT 0 + +#define VPE_SC_MP_SC13 0x0734 +#define VPE_SC_FACTOR_RAV_MASK 0x03ff +#define VPE_SC_FACTOR_RAV_SHIFT 0 +#define VPE_CHROMA_INTP_THR_MASK 0x03ff +#define VPE_CHROMA_INTP_THR_SHIFT 12 +#define VPE_DELTA_CHROMA_THR_MASK 0x0f +#define VPE_DELTA_CHROMA_THR_SHIFT 24 + +#define VPE_SC_MP_SC17 0x0744 +#define VPE_EV_THR_MASK 0x03ff +#define VPE_EV_THR_SHIFT 12 +#define VPE_DELTA_LUMA_THR_MASK 0x0f +#define VPE_DELTA_LUMA_THR_SHIFT 24 +#define VPE_DELTA_EV_THR_MASK 0x0f +#define VPE_DELTA_EV_THR_SHIFT 28 + +#define VPE_SC_MP_SC18 0x0748 +#define VPE_HS_FACTOR_MASK 0x03ff +#define VPE_HS_FACTOR_SHIFT 0 +#define VPE_CONF_DEFAULT_MASK 0x01ff +#define VPE_CONF_DEFAULT_SHIFT 16 + +#define VPE_SC_MP_SC19 0x074c +#define VPE_HPF_COEFF0_MASK 0xff +#define VPE_HPF_COEFF0_SHIFT 0 +#define VPE_HPF_COEFF1_MASK 0xff +#define VPE_HPF_COEFF1_SHIFT 8 +#define VPE_HPF_COEFF2_MASK 0xff +#define VPE_HPF_COEFF2_SHIFT 16 +#define VPE_HPF_COEFF3_MASK 0xff +#define VPE_HPF_COEFF3_SHIFT 23 + +#define VPE_SC_MP_SC20 0x0750 +#define VPE_HPF_COEFF4_MASK 0xff +#define VPE_HPF_COEFF4_SHIFT 0 +#define VPE_HPF_COEFF5_MASK 0xff +#define VPE_HPF_COEFF5_SHIFT 8 +#define VPE_HPF_NORM_SHIFT_MASK 0x07 +#define VPE_HPF_NORM_SHIFT_SHIFT 16 +#define VPE_NL_LIMIT_MASK 0x1ff +#define VPE_NL_LIMIT_SHIFT 20 + +#define VPE_SC_MP_SC21 0x0754 +#define VPE_NL_LO_THR_MASK 0x01ff +#define VPE_NL_LO_THR_SHIFT 0 +#define VPE_NL_LO_SLOPE_MASK 0xff +#define VPE_NL_LO_SLOPE_SHIFT 16 + +#define VPE_SC_MP_SC22 0x0758 +#define VPE_NL_HI_THR_MASK 0x01ff +#define VPE_NL_HI_THR_SHIFT 0 +#define VPE_NL_HI_SLOPE_SH_MASK 0x07 +#define VPE_NL_HI_SLOPE_SH_SHIFT 16 + +#define VPE_SC_MP_SC23 0x075c +#define VPE_GRADIENT_THR_MASK 0x07ff +#define VPE_GRADIENT_THR_SHIFT 0 +#define VPE_GRADIENT_THR_RANGE_MASK 0x0f +#define VPE_GRADIENT_THR_RANGE_SHIFT 12 +#define VPE_MIN_GY_THR_MASK 0xff +#define VPE_MIN_GY_THR_SHIFT 16 +#define VPE_MIN_GY_THR_RANGE_MASK 0x0f +#define VPE_MIN_GY_THR_RANGE_SHIFT 28 + +#define VPE_SC_MP_SC24 0x0760 +#define VPE_ORG_H_MASK 0x07ff +#define VPE_ORG_H_SHIFT 0 +#define VPE_ORG_W_MASK 0x07ff +#define VPE_ORG_W_SHIFT 16 + +#define VPE_SC_MP_SC25 0x0764 +#define VPE_OFF_H_MASK 0x07ff +#define VPE_OFF_H_SHIFT 0 +#define VPE_OFF_W_MASK 0x07ff +#define VPE_OFF_W_SHIFT 16 + +/* VPE color space converter regs */ +#define VPE_CSC_CSC00 0x5700 +#define VPE_CSC_A0_MASK 0x1fff +#define VPE_CSC_A0_SHIFT 0 +#define VPE_CSC_B0_MASK 0x1fff +#define VPE_CSC_B0_SHIFT 16 + +#define VPE_CSC_CSC01 0x5704 +#define VPE_CSC_C0_MASK 0x1fff +#define VPE_CSC_C0_SHIFT 0 +#define VPE_CSC_A1_MASK 0x1fff +#define VPE_CSC_A1_SHIFT 16 + +#define VPE_CSC_CSC02 0x5708 +#define VPE_CSC_B1_MASK 0x1fff +#define VPE_CSC_B1_SHIFT 0 +#define VPE_CSC_C1_MASK 0x1fff +#define VPE_CSC_C1_SHIFT 16 + +#define VPE_CSC_CSC03 0x570c +#define VPE_CSC_A2_MASK 0x1fff +#define VPE_CSC_A2_SHIFT 0 +#define VPE_CSC_B2_MASK 0x1fff +#define VPE_CSC_B2_SHIFT 16 + +#define VPE_CSC_CSC04 0x5710 +#define VPE_CSC_C2_MASK 0x1fff +#define VPE_CSC_C2_SHIFT 0 +#define VPE_CSC_D0_MASK 0x0fff +#define VPE_CSC_D0_SHIFT 16 + +#define VPE_CSC_CSC05 0x5714 +#define VPE_CSC_D1_MASK 0x0fff +#define VPE_CSC_D1_SHIFT 0 +#define VPE_CSC_D2_MASK 0x0fff +#define VPE_CSC_D2_SHIFT 16 +#define VPE_CSC_BYPASS (1 << 28) + +#endif diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c index b557caf5b1a4..ccdadd623a3a 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/media/platform/timblogiw.c @@ -403,7 +403,7 @@ static int timblogiw_s_input(struct file *file, void *priv, unsigned int input) return 0; } -static int timblogiw_streamon(struct file *file, void *priv, unsigned int type) +static int timblogiw_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct video_device *vdev = video_devdata(file); struct timblogiw_fh *fh = priv; @@ -420,7 +420,7 @@ static int timblogiw_streamon(struct file *file, void *priv, unsigned int type) } static int timblogiw_streamoff(struct file *file, void *priv, - unsigned int type) + enum v4l2_buf_type type) { struct video_device *vdev = video_devdata(file); struct timblogiw_fh *fh = priv; @@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) desc = dmaengine_prep_slave_sg(fh->chan, buf->sg, sg_elems, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + DMA_PREP_INTERRUPT); if (!desc) { spin_lock_irq(&fh->queue_lock); list_del_init(&vb->queue); diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c index 21db23b196be..fa3964022b96 100644 --- a/drivers/media/radio/radio-keene.c +++ b/drivers/media/radio/radio-keene.c @@ -123,7 +123,7 @@ static int keene_cmd_set(struct keene_device *radio) /* If bit 0 is set, then transmit mono, otherwise stereo. If bit 2 is set, then enable 75 us preemphasis, otherwise it is 50 us. */ - radio->buffer[3] = (!radio->stereo) | (radio->preemph_75_us ? 4 : 0); + radio->buffer[3] = (radio->stereo ? 0 : 1) | (radio->preemph_75_us ? 4 : 0); radio->buffer[4] = 0x00; radio->buffer[5] = 0x00; radio->buffer[6] = 0x00; diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c index f1e3714b5f16..93d864eb8306 100644 --- a/drivers/media/radio/radio-sf16fmr2.c +++ b/drivers/media/radio/radio-sf16fmr2.c @@ -74,8 +74,8 @@ static u8 fmr2_tea575x_get_pins(struct snd_tea575x *tea) struct fmr2 *fmr2 = tea->private_data; u8 bits = inb(fmr2->io); - return (bits & STR_DATA) ? TEA575X_DATA : 0 | - (bits & STR_MOST) ? TEA575X_MOST : 0; + return ((bits & STR_DATA) ? TEA575X_DATA : 0) | + ((bits & STR_MOST) ? TEA575X_MOST : 0); } static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output) @@ -295,7 +295,6 @@ static void fmr2_remove(struct fmr2 *fmr2) static int fmr2_isa_remove(struct device *pdev, unsigned int ndev) { fmr2_remove(dev_get_drvdata(pdev)); - dev_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index b91477212413..3db8a8cfe1a8 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -271,6 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } +#ifdef CONFIG_PM static void shark_resume_leds(struct shark_device *shark) { if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) @@ -280,6 +281,7 @@ static void shark_resume_leds(struct shark_device *shark) set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } +#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index 9fb669721e66..d86d90dab8bf 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -237,6 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } +#ifdef CONFIG_PM static void shark_resume_leds(struct shark_device *shark) { int i; @@ -246,6 +247,7 @@ static void shark_resume_leds(struct shark_device *shark) schedule_work(&shark->led_work); } +#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c index 0bd250068285..0e750aef656a 100644 --- a/drivers/media/radio/si470x/radio-si470x-common.c +++ b/drivers/media/radio/si470x/radio-si470x-common.c @@ -254,7 +254,7 @@ static unsigned int si470x_get_step(struct si470x_device *radio) /* 2: 50 kHz */ default: return 50 * 16; - }; + } } diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index e5fc9acd0c4f..2a497c80c77f 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -463,7 +463,7 @@ static int si470x_i2c_remove(struct i2c_client *client) } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /* * si470x_i2c_suspend - suspend the device */ @@ -509,7 +509,7 @@ static struct i2c_driver si470x_i2c_driver = { .driver = { .name = "si470x", .owner = THIS_MODULE, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .pm = &si470x_i2c_pm, #endif }, diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c index fe160882ee10..9ec48ccbcf0b 100644 --- a/drivers/media/radio/si4713-i2c.c +++ b/drivers/media/radio/si4713-i2c.c @@ -1456,7 +1456,7 @@ static int si4713_probe(struct i2c_client *client, if (client->irq) { rval = request_irq(client->irq, - si4713_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED, + si4713_handler, IRQF_TRIGGER_FALLING, client->name, sdev); if (rval < 0) { v4l2_err(&sdev->sd, "Could not request IRQ\n"); diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c index 06ac69245ca1..69e3245a58a0 100644 --- a/drivers/media/radio/tef6862.c +++ b/drivers/media/radio/tef6862.c @@ -48,15 +48,15 @@ #define WM_SUB_TEST 0xF /* Different modes of the MSA register */ -#define MODE_BUFFER 0x0 -#define MODE_PRESET 0x1 -#define MODE_SEARCH 0x2 -#define MODE_AF_UPDATE 0x3 -#define MODE_JUMP 0x4 -#define MODE_CHECK 0x5 -#define MODE_LOAD 0x6 -#define MODE_END 0x7 -#define MODE_SHIFT 5 +#define MSA_MODE_BUFFER 0x0 +#define MSA_MODE_PRESET 0x1 +#define MSA_MODE_SEARCH 0x2 +#define MSA_MODE_AF_UPDATE 0x3 +#define MSA_MODE_JUMP 0x4 +#define MSA_MODE_CHECK 0x5 +#define MSA_MODE_LOAD 0x6 +#define MSA_MODE_END 0x7 +#define MSA_MODE_SHIFT 5 struct tef6862_state { struct v4l2_subdev sd; @@ -114,7 +114,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; - i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM; + i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; i2cmsg[1] = (pll >> 8) & 0xff; i2cmsg[2] = pll & 0xff; diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 253f307f0b37..4b2e9e8298e1 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -175,7 +175,7 @@ static int_handler_prototype int_handler_table[] = { fm_irq_handle_intmsk_cmd_resp }; -long (*g_st_write) (struct sk_buff *skb); +static long (*g_st_write) (struct sk_buff *skb); static struct completion wait_for_fmdrv_reg_comp; static inline void fm_irq_call(struct fmdev *fmdev) diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 11e84bcc23a1..904f11367c29 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -322,4 +322,14 @@ config IR_GPIO_CIR To compile this driver as a module, choose M here: the module will be called gpio-ir-recv. +config RC_ST + tristate "ST remote control receiver" + depends on ARCH_STI && RC_CORE + help + Say Y here if you want support for ST remote control driver + which allows both IR and UHF RX. + The driver passes raw pulse and space information to the LIRC decoder. + + If you're not sure, select N here. + endif #RC_DEVICES diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 56bacf07b361..f4eb32c0a455 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o obj-$(CONFIG_IR_IGUANA) += iguanair.o obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o +obj-$(CONFIG_RC_ST) += st_rc.o diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h index 82516a1d39b0..b698f3d2ced9 100644 --- a/drivers/media/rc/fintek-cir.h +++ b/drivers/media/rc/fintek-cir.h @@ -76,8 +76,8 @@ struct fintek_dev { } tx; /* Config register index/data port pair */ - u8 cr_ip; - u8 cr_dp; + u32 cr_ip; + u32 cr_dp; /* hardware I/O settings */ unsigned long cir_addr; diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index 07aacfa5903d..80c611c2e8c2 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/irq.h> diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index b53626ba6f49..fdae05c4f377 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -308,22 +308,12 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier) cycles = DIV_ROUND_CLOSEST(24000000, carrier * 2) - ir->cycle_overhead; - /* make up the the remainer of 4-cycle blocks */ - switch (cycles & 3) { - case 0: - sevens = 0; - break; - case 1: - sevens = 3; - break; - case 2: - sevens = 2; - break; - case 3: - sevens = 1; - break; - } - + /* + * Calculate minimum number of 7 cycles needed so + * we are left with a multiple of 4; so we want to have + * (sevens * 7) & 3 == cycles & 3 + */ + sevens = (4 - cycles) & 3; fours = (cycles - sevens * 7) / 4; /* magic happens here */ diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c index 31b955bf7664..b1e19a26208d 100644 --- a/drivers/media/rc/ir-rx51.c +++ b/drivers/media/rc/ir-rx51.c @@ -201,8 +201,7 @@ static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51) lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer); retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler, - IRQF_DISABLED | IRQF_SHARED, - "lirc_pulse_timer", lirc_rx51); + IRQF_SHARED, "lirc_pulse_timer", lirc_rx51); if (retval) { dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n"); goto err2; diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c index 4d13a7f2e5c3..492a05ade7e1 100644 --- a/drivers/media/rc/keymaps/rc-dib0700-nec.c +++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c @@ -5,7 +5,7 @@ * TODO: This table is a real mess, as it merges RC codes from several * devices into a big table. It also has both RC-5 and NEC codes inside. * It should be broken into small tables, and the protocols should properly - * be indentificated. + * be identificated. * * The table were imported from dib0700_devices.c. * diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c index ba81d9697cfc..454ea596a7ee 100644 --- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c +++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c @@ -5,7 +5,7 @@ * TODO: This table is a real mess, as it merges RC codes from several * devices into a big table. It also has both RC-5 and NEC codes inside. * It should be broken into small tables, and the protocols should properly - * be indentificated. + * be identificated. * * The table were imported from dib0700_devices.c. * diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 7c3674ff5ea2..07e83108df0f 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -84,8 +84,8 @@ struct nvt_dev { } tx; /* EFER Config register index/data pair */ - u8 cr_efir; - u8 cr_efdr; + u32 cr_efir; + u32 cr_efdr; /* hardware I/O settings */ unsigned long cir_addr; diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c new file mode 100644 index 000000000000..65120c2d47ad --- /dev/null +++ b/drivers/media/rc/st_rc.c @@ -0,0 +1,395 @@ +/* + * Copyright (C) 2013 STMicroelectronics Limited + * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <media/rc-core.h> +#include <linux/pinctrl/consumer.h> + +struct st_rc_device { + struct device *dev; + int irq; + int irq_wake; + struct clk *sys_clock; + void *base; /* Register base address */ + void *rx_base;/* RX Register base address */ + struct rc_dev *rdev; + bool overclocking; + int sample_mult; + int sample_div; + bool rxuhfmode; +}; + +/* Registers */ +#define IRB_SAMPLE_RATE_COMM 0x64 /* sample freq divisor*/ +#define IRB_CLOCK_SEL 0x70 /* clock select */ +#define IRB_CLOCK_SEL_STATUS 0x74 /* clock status */ +/* IRB IR/UHF receiver registers */ +#define IRB_RX_ON 0x40 /* pulse time capture */ +#define IRB_RX_SYS 0X44 /* sym period capture */ +#define IRB_RX_INT_EN 0x48 /* IRQ enable (R/W) */ +#define IRB_RX_INT_STATUS 0x4c /* IRQ status (R/W) */ +#define IRB_RX_EN 0x50 /* Receive enable */ +#define IRB_MAX_SYM_PERIOD 0x54 /* max sym value */ +#define IRB_RX_INT_CLEAR 0x58 /* overrun status */ +#define IRB_RX_STATUS 0x6c /* receive status */ +#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */ +#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */ + +/** + * IRQ set: Enable full FIFO 1 -> bit 3; + * Enable overrun IRQ 1 -> bit 2; + * Enable last symbol IRQ 1 -> bit 1: + * Enable RX interrupt 1 -> bit 0; + */ +#define IRB_RX_INTS 0x0f +#define IRB_RX_OVERRUN_INT 0x04 + /* maximum symbol period (microsecs),timeout to detect end of symbol train */ +#define MAX_SYMB_TIME 0x5000 +#define IRB_SAMPLE_FREQ 10000000 +#define IRB_FIFO_NOT_EMPTY 0xff00 +#define IRB_OVERFLOW 0x4 +#define IRB_TIMEOUT 0xffff +#define IR_ST_NAME "st-rc" + +static void st_rc_send_lirc_timeout(struct rc_dev *rdev) +{ + DEFINE_IR_RAW_EVENT(ev); + ev.timeout = true; + ir_raw_event_store(rdev, &ev); +} + +/** + * RX graphical example to better understand the difference between ST IR block + * output and standard definition used by LIRC (and most of the world!) + * + * mark mark + * |-IRB_RX_ON-| |-IRB_RX_ON-| + * ___ ___ ___ ___ ___ ___ _ + * | | | | | | | | | | | | | + * | | | | | | space 0 | | | | | | space 1 | + * _____| |__| |__| |____________________________| |__| |__| |_____________| + * + * |--------------- IRB_RX_SYS -------------|------ IRB_RX_SYS -------| + * + * |------------- encoding bit 0 -----------|---- encoding bit 1 -----| + * + * ST hardware returns mark (IRB_RX_ON) and total symbol time (IRB_RX_SYS), so + * convert to standard mark/space we have to calculate space=(IRB_RX_SYS-mark) + * The mark time represents the amount of time the carrier (usually 36-40kHz) + * is detected.The above examples shows Pulse Width Modulation encoding where + * bit 0 is represented by space>mark. + */ + +static irqreturn_t st_rc_rx_interrupt(int irq, void *data) +{ + unsigned int symbol, mark = 0; + struct st_rc_device *dev = data; + int last_symbol = 0; + u32 status; + DEFINE_IR_RAW_EVENT(ev); + + if (dev->irq_wake) + pm_wakeup_event(dev->dev, 0); + + status = readl(dev->rx_base + IRB_RX_STATUS); + + while (status & (IRB_FIFO_NOT_EMPTY | IRB_OVERFLOW)) { + u32 int_status = readl(dev->rx_base + IRB_RX_INT_STATUS); + if (unlikely(int_status & IRB_RX_OVERRUN_INT)) { + /* discard the entire collection in case of errors! */ + ir_raw_event_reset(dev->rdev); + dev_info(dev->dev, "IR RX overrun\n"); + writel(IRB_RX_OVERRUN_INT, + dev->rx_base + IRB_RX_INT_CLEAR); + continue; + } + + symbol = readl(dev->rx_base + IRB_RX_SYS); + mark = readl(dev->rx_base + IRB_RX_ON); + + if (symbol == IRB_TIMEOUT) + last_symbol = 1; + + /* Ignore any noise */ + if ((mark > 2) && (symbol > 1)) { + symbol -= mark; + if (dev->overclocking) { /* adjustments to timings */ + symbol *= dev->sample_mult; + symbol /= dev->sample_div; + mark *= dev->sample_mult; + mark /= dev->sample_div; + } + + ev.duration = US_TO_NS(mark); + ev.pulse = true; + ir_raw_event_store(dev->rdev, &ev); + + if (!last_symbol) { + ev.duration = US_TO_NS(symbol); + ev.pulse = false; + ir_raw_event_store(dev->rdev, &ev); + } else { + st_rc_send_lirc_timeout(dev->rdev); + } + + } + last_symbol = 0; + status = readl(dev->rx_base + IRB_RX_STATUS); + } + + writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_CLEAR); + + /* Empty software fifo */ + ir_raw_event_handle(dev->rdev); + return IRQ_HANDLED; +} + +static void st_rc_hardware_init(struct st_rc_device *dev) +{ + int baseclock, freqdiff; + unsigned int rx_max_symbol_per = MAX_SYMB_TIME; + unsigned int rx_sampling_freq_div; + + clk_prepare_enable(dev->sys_clock); + baseclock = clk_get_rate(dev->sys_clock); + + /* IRB input pins are inverted internally from high to low. */ + writel(1, dev->rx_base + IRB_RX_POLARITY_INV); + + rx_sampling_freq_div = baseclock / IRB_SAMPLE_FREQ; + writel(rx_sampling_freq_div, dev->base + IRB_SAMPLE_RATE_COMM); + + freqdiff = baseclock - (rx_sampling_freq_div * IRB_SAMPLE_FREQ); + if (freqdiff) { /* over clocking, workout the adjustment factors */ + dev->overclocking = true; + dev->sample_mult = 1000; + dev->sample_div = baseclock / (10000 * rx_sampling_freq_div); + rx_max_symbol_per = (rx_max_symbol_per * 1000)/dev->sample_div; + } + + writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD); +} + +static int st_rc_remove(struct platform_device *pdev) +{ + struct st_rc_device *rc_dev = platform_get_drvdata(pdev); + clk_disable_unprepare(rc_dev->sys_clock); + rc_unregister_device(rc_dev->rdev); + return 0; +} + +static int st_rc_open(struct rc_dev *rdev) +{ + struct st_rc_device *dev = rdev->priv; + unsigned long flags; + local_irq_save(flags); + /* enable interrupts and receiver */ + writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_EN); + writel(0x01, dev->rx_base + IRB_RX_EN); + local_irq_restore(flags); + + return 0; +} + +static void st_rc_close(struct rc_dev *rdev) +{ + struct st_rc_device *dev = rdev->priv; + /* disable interrupts and receiver */ + writel(0x00, dev->rx_base + IRB_RX_EN); + writel(0x00, dev->rx_base + IRB_RX_INT_EN); +} + +static int st_rc_probe(struct platform_device *pdev) +{ + int ret = -EINVAL; + struct rc_dev *rdev; + struct device *dev = &pdev->dev; + struct resource *res; + struct st_rc_device *rc_dev; + struct device_node *np = pdev->dev.of_node; + const char *rx_mode; + + rc_dev = devm_kzalloc(dev, sizeof(struct st_rc_device), GFP_KERNEL); + + if (!rc_dev) + return -ENOMEM; + + rdev = rc_allocate_device(); + + if (!rdev) + return -ENOMEM; + + if (np && !of_property_read_string(np, "rx-mode", &rx_mode)) { + + if (!strcmp(rx_mode, "uhf")) { + rc_dev->rxuhfmode = true; + } else if (!strcmp(rx_mode, "infrared")) { + rc_dev->rxuhfmode = false; + } else { + dev_err(dev, "Unsupported rx mode [%s]\n", rx_mode); + goto err; + } + + } else { + goto err; + } + + rc_dev->sys_clock = devm_clk_get(dev, NULL); + if (IS_ERR(rc_dev->sys_clock)) { + dev_err(dev, "System clock not found\n"); + ret = PTR_ERR(rc_dev->sys_clock); + goto err; + } + + rc_dev->irq = platform_get_irq(pdev, 0); + if (rc_dev->irq < 0) { + ret = rc_dev->irq; + goto err; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + rc_dev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(rc_dev->base)) { + ret = PTR_ERR(rc_dev->base); + goto err; + } + + if (rc_dev->rxuhfmode) + rc_dev->rx_base = rc_dev->base + 0x40; + else + rc_dev->rx_base = rc_dev->base; + + rc_dev->dev = dev; + platform_set_drvdata(pdev, rc_dev); + st_rc_hardware_init(rc_dev); + + rdev->driver_type = RC_DRIVER_IR_RAW; + rdev->allowed_protos = RC_BIT_ALL; + /* rx sampling rate is 10Mhz */ + rdev->rx_resolution = 100; + rdev->timeout = US_TO_NS(MAX_SYMB_TIME); + rdev->priv = rc_dev; + rdev->open = st_rc_open; + rdev->close = st_rc_close; + rdev->driver_name = IR_ST_NAME; + rdev->map_name = RC_MAP_LIRC; + rdev->input_name = "ST Remote Control Receiver"; + + /* enable wake via this device */ + device_set_wakeup_capable(dev, true); + device_set_wakeup_enable(dev, true); + + ret = rc_register_device(rdev); + if (ret < 0) + goto clkerr; + + rc_dev->rdev = rdev; + if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt, + IRQF_NO_SUSPEND, IR_ST_NAME, rc_dev) < 0) { + dev_err(dev, "IRQ %d register failed\n", rc_dev->irq); + ret = -EINVAL; + goto rcerr; + } + + /** + * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW + * lircd expects a long space first before a signal train to sync. + */ + st_rc_send_lirc_timeout(rdev); + + dev_info(dev, "setup in %s mode\n", rc_dev->rxuhfmode ? "UHF" : "IR"); + + return ret; +rcerr: + rc_unregister_device(rdev); + rdev = NULL; +clkerr: + clk_disable_unprepare(rc_dev->sys_clock); +err: + rc_free_device(rdev); + dev_err(dev, "Unable to register device (%d)\n", ret); + return ret; +} + +#ifdef CONFIG_PM +static int st_rc_suspend(struct device *dev) +{ + struct st_rc_device *rc_dev = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) { + if (!enable_irq_wake(rc_dev->irq)) + rc_dev->irq_wake = 1; + else + return -EINVAL; + } else { + pinctrl_pm_select_sleep_state(dev); + writel(0x00, rc_dev->rx_base + IRB_RX_EN); + writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN); + clk_disable_unprepare(rc_dev->sys_clock); + } + + return 0; +} + +static int st_rc_resume(struct device *dev) +{ + struct st_rc_device *rc_dev = dev_get_drvdata(dev); + struct rc_dev *rdev = rc_dev->rdev; + + if (rc_dev->irq_wake) { + disable_irq_wake(rc_dev->irq); + rc_dev->irq_wake = 0; + } else { + pinctrl_pm_select_default_state(dev); + st_rc_hardware_init(rc_dev); + if (rdev->users) { + writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN); + writel(0x01, rc_dev->rx_base + IRB_RX_EN); + } + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume); +#endif + +#ifdef CONFIG_OF +static struct of_device_id st_rc_match[] = { + { .compatible = "st,comms-irb", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, st_rc_match); +#endif + +static struct platform_driver st_rc_driver = { + .driver = { + .name = IR_ST_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(st_rc_match), +#ifdef CONFIG_PM + .pm = &st_rc_pm_ops, +#endif + }, + .probe = st_rc_probe, + .remove = st_rc_remove, +}; + +module_platform_driver(st_rc_driver); + +MODULE_DESCRIPTION("RC Transceiver driver for STMicroelectronics platforms"); +MODULE_AUTHOR("STMicroelectronics (R&D) Ltd"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c index 98bd4960c75e..904baf4eec28 100644 --- a/drivers/media/rc/winbond-cir.c +++ b/drivers/media/rc/winbond-cir.c @@ -1110,7 +1110,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) } err = request_irq(data->irq, wbcir_irq_handler, - IRQF_DISABLED, DRVNAME, device); + 0, DRVNAME, device); if (err) { dev_err(dev, "Failed to claim IRQ %u\n", data->irq); err = -EBUSY; diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c index 6c96e4898777..72971a8d3c37 100644 --- a/drivers/media/tuners/e4000.c +++ b/drivers/media/tuners/e4000.c @@ -21,20 +21,30 @@ #include "e4000_priv.h" #include <linux/math64.h> +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple registers */ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[1 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -54,7 +64,7 @@ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_addr, @@ -64,11 +74,18 @@ static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) }, { .addr = priv->cfg->i2c_addr, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c rd reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c index f4d0e797a6cc..d74e92056810 100644 --- a/drivers/media/tuners/fc0012.c +++ b/drivers/media/tuners/fc0012.c @@ -139,7 +139,7 @@ static int fc0012_set_params(struct dvb_frontend *fe) unsigned char reg[7], am, pm, multi, tmp; unsigned long f_vco; unsigned short xtal_freq_khz_2, xin, xdiv; - int vco_select = false; + bool vco_select = false; if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c index bd8f0f1e8f3b..b4162315773d 100644 --- a/drivers/media/tuners/fc0013.c +++ b/drivers/media/tuners/fc0013.c @@ -233,7 +233,7 @@ static int fc0013_set_params(struct dvb_frontend *fe) unsigned char reg[7], am, pm, multi, tmp; unsigned long f_vco; unsigned short xtal_freq_khz_2, xin, xdiv; - int vco_select = false; + bool vco_select = false; if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c index 81f38aae9c66..3aecaf465094 100644 --- a/drivers/media/tuners/fc2580.c +++ b/drivers/media/tuners/fc2580.c @@ -20,6 +20,9 @@ #include "fc2580_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* * TODO: * I2C write and read works only for one single register. Multiple registers @@ -41,16 +44,23 @@ static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[1 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -69,7 +79,7 @@ static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_addr, @@ -79,11 +89,18 @@ static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) }, { .addr = priv->cfg->i2c_addr, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c rd reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index 1c23666468cf..d9ee43fae62d 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c @@ -612,10 +612,19 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type, vco_fine_tune = (data[4] & 0x30) >> 4; - if (vco_fine_tune > VCO_POWER_REF) - div_num = div_num - 1; - else if (vco_fine_tune < VCO_POWER_REF) - div_num = div_num + 1; + tuner_dbg("mix_div=%d div_num=%d vco_fine_tune=%d\n", + mix_div, div_num, vco_fine_tune); + + /* + * XXX: R828D/16MHz seems to have always vco_fine_tune=1. + * Due to that, this calculation goes wrong. + */ + if (priv->cfg->rafael_chip != CHIP_R828D) { + if (vco_fine_tune > VCO_POWER_REF) + div_num = div_num - 1; + else if (vco_fine_tune < VCO_POWER_REF) + div_num = div_num + 1; + } rc = r820t_write_reg_mask(priv, 0x10, div_num << 5, 0xe0); if (rc < 0) @@ -637,11 +646,6 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type, vco_fra = pll_ref * 129 / 128; } - if (nint > 63) { - tuner_info("No valid PLL values for %u kHz!\n", freq); - return -EINVAL; - } - ni = (nint - 13) / 4; si = nint - 4 * ni - 13; diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c index e4a84ee231cf..abe256e1f843 100644 --- a/drivers/media/tuners/tda18212.c +++ b/drivers/media/tuners/tda18212.c @@ -20,6 +20,9 @@ #include "tda18212.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct tda18212_priv { struct tda18212_config *cfg; struct i2c_adapter *i2c; @@ -32,16 +35,23 @@ static int tda18212_wr_regs(struct tda18212_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len+1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_address, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -61,7 +71,7 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, @@ -71,11 +81,18 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val, }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c rd reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c index 2d31aeb6b088..9300e9361e3b 100644 --- a/drivers/media/tuners/tda18218.c +++ b/drivers/media/tuners/tda18218.c @@ -20,11 +20,14 @@ #include "tda18218_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple registers */ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) { int ret = 0, len2, remaining; - u8 buf[1 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_address, @@ -33,6 +36,13 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + for (remaining = len; remaining > 0; remaining -= (priv->cfg->i2c_wr_max - 1)) { len2 = remaining; @@ -63,7 +73,7 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) { int ret; - u8 buf[reg+len]; /* we must start read always from reg 0x00 */ + u8 buf[MAX_XFER_SIZE]; /* we must start read always from reg 0x00 */ struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, @@ -73,11 +83,18 @@ static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = reg + len, .buf = buf, } }; + if (reg + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, &buf[reg], len); diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c index 300005c535ba..9823248d743f 100644 --- a/drivers/media/tuners/tda9887.c +++ b/drivers/media/tuners/tda9887.c @@ -536,8 +536,8 @@ static int tda9887_status(struct dvb_frontend *fe) unsigned char buf[1]; int rc; - memset(buf,0,sizeof(buf)); - if (1 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props,buf,1))) + rc = tuner_i2c_xfer_recv(&priv->i2c_props, buf, 1); + if (rc != 1) tuner_info("i2c i/o error: rc == %d (should be 1)\n", rc); dump_read_message(fe, buf); return 0; diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index 878d2c4d9e8e..4be5cf808a40 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -24,6 +24,9 @@ #include <linux/dvb/frontend.h> #include "dvb_frontend.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 80 + /* Registers (Write-only) */ #define XREG_INIT 0x00 #define XREG_RF_FREQ 0x02 @@ -547,7 +550,10 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type, { struct xc2028_data *priv = fe->tuner_priv; int pos, rc; - unsigned char *p, *endp, buf[priv->ctrl.max_len]; + unsigned char *p, *endp, buf[MAX_XFER_SIZE]; + + if (priv->ctrl.max_len > sizeof(buf)) + priv->ctrl.max_len = sizeof(buf); tuner_dbg("%s called\n", __func__); @@ -572,7 +578,7 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type, return -EINVAL; } - size = le16_to_cpu(*(__u16 *) p); + size = le16_to_cpu(*(__le16 *) p); p += sizeof(size); if (size == 0xffff) @@ -683,7 +689,7 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type, /* 16 SCODE entries per file; each SCODE entry is 12 bytes and * has a 2-byte size header in the firmware format. */ if (priv->firm[pos].size != 14 * 16 || scode >= 16 || - le16_to_cpu(*(__u16 *)(p + 14 * scode)) != 12) + le16_to_cpu(*(__le16 *)(p + 14 * scode)) != 12) return -EINVAL; p += 14 * scode + 2; } diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 8b6275f85908..0bd969063392 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -390,7 +390,7 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb) } if (fc_usb->iso_buffer != NULL) - pci_free_consistent(NULL, + usb_free_coherent(fc_usb->udev, fc_usb->buffer_size, fc_usb->iso_buffer, fc_usb->dma_addr); } @@ -407,8 +407,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb) "each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB, B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize); - fc_usb->iso_buffer = pci_alloc_consistent(NULL, - bufsize, &fc_usb->dma_addr); + fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev, + bufsize, GFP_KERNEL, &fc_usb->dma_addr); if (fc_usb->iso_buffer == NULL) return -ENOMEM; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index be1719283609..351a78a84c3d 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -209,7 +209,7 @@ static void cpia2_usb_complete(struct urb *urb) { int i; unsigned char *cdata; - static int frame_ready = false; + static bool frame_ready = false; struct camera_data *cam = (struct camera_data *) urb->context; if (urb->status!=0) { diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index a384f80f595e..e9d017bea377 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -978,7 +978,6 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, int minor) { int retval = -ENOMEM; - int errCode; unsigned int maxh, maxw; dev->udev = udev; @@ -1014,8 +1013,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, /* Cx231xx pre card setup */ cx231xx_pre_card_setup(dev); - errCode = cx231xx_config(dev); - if (errCode) { + retval = cx231xx_config(dev); + if (retval) { cx231xx_errdev("error configuring device\n"); return -ENOMEM; } @@ -1024,12 +1023,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, dev->norm = dev->board.norm; /* register i2c bus */ - errCode = cx231xx_dev_init(dev); - if (errCode < 0) { - cx231xx_dev_uninit(dev); + retval = cx231xx_dev_init(dev); + if (retval) { cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n", - __func__, errCode); - return errCode; + __func__, retval); + goto err_dev_init; } /* Do board specific init */ @@ -1047,11 +1045,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, dev->interlaced = 0; dev->video_input = 0; - errCode = cx231xx_config(dev); - if (errCode < 0) { + retval = cx231xx_config(dev); + if (retval) { cx231xx_errdev("%s: cx231xx_config - errCode [%d]!\n", - __func__, errCode); - return errCode; + __func__, retval); + goto err_dev_init; } /* init video dma queues */ @@ -1075,9 +1073,9 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, } retval = cx231xx_register_analog_devices(dev); - if (retval < 0) { - cx231xx_release_resources(dev); - return retval; + if (retval) { + cx231xx_release_analog_resources(dev); + goto err_analog; } cx231xx_ir_init(dev); @@ -1085,6 +1083,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, cx231xx_init_extension(dev); return 0; +err_analog: + cx231xx_remove_from_devlist(dev); +err_dev_init: + cx231xx_dev_uninit(dev); + return retval; } #if defined(CONFIG_MODULES) && defined(MODULE) @@ -1132,7 +1135,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface, char *speed; struct usb_interface_assoc_descriptor *assoc_desc; - udev = usb_get_dev(interface_to_usbdev(interface)); ifnum = interface->altsetting[0].desc.bInterfaceNumber; /* @@ -1161,6 +1163,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, return -ENOMEM; } + udev = usb_get_dev(interface_to_usbdev(interface)); + snprintf(dev->name, 29, "cx231xx #%d", nr); dev->devno = nr; dev->model = id->driver_info; @@ -1223,10 +1227,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (assoc_desc->bFirstInterface != ifnum) { cx231xx_err(DRIVER_NAME ": Not found " "matching IAD interface\n"); - clear_bit(dev->devno, &cx231xx_devused); - kfree(dev); - dev = NULL; - return -ENODEV; + retval = -ENODEV; + goto err_if; } cx231xx_info("registering interface %d\n", ifnum); @@ -1242,22 +1244,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface, retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev); if (retval) { cx231xx_errdev("v4l2_device_register failed\n"); - clear_bit(dev->devno, &cx231xx_devused); - kfree(dev); - dev = NULL; - return -EIO; + retval = -EIO; + goto err_v4l2; } /* allocate device struct */ retval = cx231xx_init_dev(dev, udev, nr); - if (retval) { - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - usb_set_intfdata(interface, NULL); - - return retval; - } + if (retval) + goto err_init; /* compute alternate max packet sizes for video */ uif = udev->actconfig->interface[dev->current_pcb_config. @@ -1275,11 +1268,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->video_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_video_alt; } for (i = 0; i < dev->video_mode.num_alt; i++) { @@ -1309,11 +1299,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->vbi_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_vbi_alt; } for (i = 0; i < dev->vbi_mode.num_alt; i++) { @@ -1344,11 +1331,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_sliced_cc_alt; } for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) { @@ -1380,11 +1364,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->ts1_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_ts1_alt; } for (i = 0; i < dev->ts1_mode.num_alt; i++) { @@ -1411,6 +1392,29 @@ static int cx231xx_usb_probe(struct usb_interface *interface, request_modules(dev); return 0; +err_ts1_alt: + kfree(dev->sliced_cc_mode.alt_max_pkt_size); +err_sliced_cc_alt: + kfree(dev->vbi_mode.alt_max_pkt_size); +err_vbi_alt: + kfree(dev->video_mode.alt_max_pkt_size); +err_video_alt: + /* cx231xx_uninit_dev: */ + cx231xx_close_extension(dev); + cx231xx_ir_exit(dev); + cx231xx_release_analog_resources(dev); + cx231xx_417_unregister(dev); + cx231xx_remove_from_devlist(dev); + cx231xx_dev_uninit(dev); +err_init: + v4l2_device_unregister(&dev->v4l2_dev); +err_v4l2: + usb_set_intfdata(interface, NULL); +err_if: + usb_put_dev(udev); + kfree(dev); + clear_bit(dev->devno, &cx231xx_devused); + return retval; } /* diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c index d7308ab7a90f..2a34ceee4802 100644 --- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c +++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c @@ -28,7 +28,7 @@ MODULE_PARM_DESC(pcb_debug, "enable pcb config debug messages [video]"); /******************************************************************************/ -struct pcb_config cx231xx_Scenario[] = { +static struct pcb_config cx231xx_Scenario[] = { { INDEX_SELFPOWER_DIGITAL_ONLY, /* index */ USB_SELF_POWER, /* power_type */ @@ -672,7 +672,7 @@ u32 initialize_cx231xx(struct cx231xx *dev) pcb config it is related to */ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, data, 4); - config_info = le32_to_cpu(*((u32 *) data)); + config_info = le32_to_cpu(*((__le32 *)data)); usb_speed = (u8) (config_info & 0x1); /* Verify this device belongs to Bus power or Self power device */ diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c index d556042cf312..da47d2392f2a 100644 --- a/drivers/media/usb/dvb-usb-v2/af9015.c +++ b/drivers/media/usb/dvb-usb-v2/af9015.c @@ -397,12 +397,13 @@ error: return ret; } +#define AF9015_EEPROM_SIZE 256 + /* hash (and dump) eeprom */ static int af9015_eeprom_hash(struct dvb_usb_device *d) { struct af9015_state *state = d_to_priv(d); int ret, i; - static const unsigned int AF9015_EEPROM_SIZE = 256; u8 buf[AF9015_EEPROM_SIZE]; struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, NULL}; diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 1ea17dc2a76e..c8fcd78425bd 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -21,6 +21,9 @@ #include "af9035.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static u16 af9035_checksum(const u8 *buf, size_t len) @@ -126,10 +129,16 @@ exit: /* write multiple registers */ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len) { - u8 wbuf[6 + len]; + u8 wbuf[MAX_XFER_SIZE]; u8 mbox = (reg >> 16) & 0xff; struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; + if (6 + len > sizeof(wbuf)) { + dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EOPNOTSUPP; + } + wbuf[0] = len; wbuf[1] = 2; wbuf[2] = 0; @@ -228,9 +237,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, msg[1].len); } else { /* I2C */ - u8 buf[5 + msg[0].len]; + u8 buf[MAX_XFER_SIZE]; struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), buf, msg[1].len, msg[1].buf }; + + if (5 + msg[0].len > sizeof(buf)) { + dev_warn(&d->udev->dev, + "%s: i2c xfer: len=%d is too big!\n", + KBUILD_MODNAME, msg[0].len); + return -EOPNOTSUPP; + } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[1].len; buf[1] = msg[0].addr << 1; @@ -257,9 +273,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, msg[0].len - 3); } else { /* I2C */ - u8 buf[5 + msg[0].len]; + u8 buf[MAX_XFER_SIZE]; struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, 0, NULL }; + + if (5 + msg[0].len > sizeof(buf)) { + dev_warn(&d->udev->dev, + "%s: i2c xfer: len=%d is too big!\n", + KBUILD_MODNAME, msg[0].len); + return -EOPNOTSUPP; + } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[0].len; buf[1] = msg[0].addr << 1; diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index e97964ef7f56..2627553f7de1 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -23,6 +23,9 @@ #include "lgdt3305.h" #include "lg2160.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + int dvb_usb_mxl111sf_debug; module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level " @@ -57,7 +60,12 @@ int mxl111sf_ctrl_msg(struct dvb_usb_device *d, { int wo = (rbuf == NULL || rlen == 0); /* write-only */ int ret; - u8 sndbuf[1+wlen]; + u8 sndbuf[MAX_XFER_SIZE]; + + if (1 + wlen > sizeof(sndbuf)) { + pr_warn("%s: len=%d is too big!\n", __func__, wlen); + return -EOPNOTSUPP; + } pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen); diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index c0cd0848631b..ecca03667f98 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -377,6 +377,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; + struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf}; dev_dbg(&d->udev->dev, "%s:\n", __func__); @@ -489,6 +490,15 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) goto found; } + /* check R828D ID register; reg=00 val=69 */ + ret = rtl28xxu_ctrl_msg(d, &req_r828d); + if (ret == 0 && buf[0] == 0x69) { + priv->tuner = TUNER_RTL2832_R828D; + priv->tuner_name = "R828D"; + goto found; + } + + found: dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name); @@ -745,6 +755,7 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap) rtl2832_config = &rtl28xxu_rtl2832_e4000_config; break; case TUNER_RTL2832_R820T: + case TUNER_RTL2832_R828D: rtl2832_config = &rtl28xxu_rtl2832_r820t_config; break; default: @@ -866,6 +877,13 @@ static const struct r820t_config rtl2832u_r820t_config = { .rafael_chip = CHIP_R820T, }; +static const struct r820t_config rtl2832u_r828d_config = { + .i2c_addr = 0x3a, + .xtal = 16000000, + .max_i2c_msg_len = 2, + .rafael_chip = CHIP_R828D, +}; + static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) { int ret; @@ -923,6 +941,27 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.read_signal_strength = adap->fe[0]->ops.tuner_ops.get_rf_strength; break; + case TUNER_RTL2832_R828D: + /* power off mn88472 demod on GPIO0 */ + ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x00, 0x01); + if (ret) + goto err; + + ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01); + if (ret) + goto err; + + ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01); + if (ret) + goto err; + + fe = dvb_attach(r820t_attach, adap->fe[0], &d->i2c_adap, + &rtl2832u_r828d_config); + + /* Use tuner to get the signal strength */ + adap->fe[0]->ops.read_signal_strength = + adap->fe[0]->ops.tuner_ops.get_rf_strength; + break; default: fe = NULL; dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME, @@ -1388,6 +1427,9 @@ static const struct usb_device_id rtl28xxu_id_table[] = { &rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) }, { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A, &rtl2832u_props, "Crypto ReDi PC 50 A", NULL) }, + + { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131, + &rtl2832u_props, "Astrometa DVB-T2", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table); diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h index 729b3540c2f9..2142bcb41b41 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h @@ -83,6 +83,7 @@ enum rtl28xxu_tuner { TUNER_RTL2832_TDA18272, TUNER_RTL2832_FC0013, TUNER_RTL2832_R820T, + TUNER_RTL2832_R828D, }; struct rtl28xxu_req { diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index ea2d5ee86576..c11138ebf6fb 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -254,7 +254,7 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = { -struct stb0899_config az6027_stb0899_config = { +static struct stb0899_config az6027_stb0899_config = { .init_dev = az6027_stb0899_s1_init_1, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = az6027_stb0899_s1_init_3, @@ -291,7 +291,7 @@ struct stb0899_config az6027_stb0899_config = { .tuner_set_rfsiggain = NULL, }; -struct stb6100_config az6027_stb6100_config = { +static struct stb6100_config az6027_stb6100_config = { .tuner_address = 0xc0, .refclock = 27000000, }; diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 3940bb0f9ef6..20e345d9fe8f 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -43,6 +43,9 @@ #include "lgs8gxx.h" #include "atbm8830.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* debug */ static int dvb_usb_cxusb_debug; module_param_named(debug, dvb_usb_cxusb_debug, int, 0644); @@ -57,7 +60,14 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d, u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { int wo = (rbuf == NULL || rlen == 0); /* write-only */ - u8 sndbuf[1+wlen]; + u8 sndbuf[MAX_XFER_SIZE]; + + if (1 + wlen > sizeof(sndbuf)) { + warn("i2c wr: len=%d is too big!\n", + wlen); + return -EOPNOTSUPP; + } + memset(sndbuf, 0, 1+wlen); sndbuf[0] = cmd; @@ -158,7 +168,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], if (msg[i].flags & I2C_M_RD) { /* read only */ - u8 obuf[3], ibuf[1+msg[i].len]; + u8 obuf[3], ibuf[MAX_XFER_SIZE]; + + if (1 + msg[i].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[i].len); + return -EOPNOTSUPP; + } obuf[0] = 0; obuf[1] = msg[i].len; obuf[2] = msg[i].addr; @@ -172,7 +188,18 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], } else if (i+1 < num && (msg[i+1].flags & I2C_M_RD) && msg[i].addr == msg[i+1].addr) { /* write to then read from same address */ - u8 obuf[3+msg[i].len], ibuf[1+msg[i+1].len]; + u8 obuf[MAX_XFER_SIZE], ibuf[MAX_XFER_SIZE]; + + if (3 + msg[i].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[i].len); + return -EOPNOTSUPP; + } + if (1 + msg[i + 1].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[i + 1].len); + return -EOPNOTSUPP; + } obuf[0] = msg[i].len; obuf[1] = msg[i+1].len; obuf[2] = msg[i].addr; @@ -191,7 +218,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], i++; } else { /* write only */ - u8 obuf[2+msg[i].len], ibuf; + u8 obuf[MAX_XFER_SIZE], ibuf; + + if (2 + msg[i].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[i].len); + return -EOPNOTSUPP; + } obuf[0] = msg[i].addr; obuf[1] = msg[i].len; memcpy(&obuf[2], msg[i].buf, msg[i].len); diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index c2dded92f1d3..6d68af0c49c8 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c @@ -12,6 +12,9 @@ #include <linux/kconfig.h> #include "dibusb.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS); @@ -105,11 +108,16 @@ EXPORT_SYMBOL(dibusb2_0_power_ctrl); static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { - u8 sndbuf[wlen+4]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */ + u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */ /* write only ? */ int wo = (rbuf == NULL || rlen == 0), len = 2 + wlen + (wo ? 0 : 2); + if (4 + wlen > sizeof(sndbuf)) { + warn("i2c wr: len=%d is too big!\n", wlen); + return -EOPNOTSUPP; + } + sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; sndbuf[1] = (addr << 1) | (wo ? 0 : 1); diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 6e237b6dd0a8..c1a63b2a6baa 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -30,6 +30,9 @@ #include "stb6100_proc.h" #include "m88rs2000.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + #ifndef USB_PID_DW2102 #define USB_PID_DW2102 0x2102 #endif @@ -308,7 +311,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms case 2: { /* read */ /* first write first register number */ - u8 ibuf[msg[1].len + 2], obuf[3]; + u8 ibuf[MAX_XFER_SIZE], obuf[3]; + + if (2 + msg[1].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; @@ -325,7 +335,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms switch (msg[0].addr) { case 0x68: { /* write to register */ - u8 obuf[msg[0].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[0].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); @@ -335,7 +352,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms } case 0x61: { /* write to tuner */ - u8 obuf[msg[0].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[0].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); @@ -401,7 +425,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ - u8 ibuf[msg[j].len + 2]; + u8 ibuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, @@ -430,7 +461,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i } while (len > 0); } else { /* write registers */ - u8 obuf[msg[j].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); @@ -463,7 +501,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], case 2: { /* read */ /* first write first register number */ - u8 ibuf[msg[1].len + 2], obuf[3]; + u8 ibuf[MAX_XFER_SIZE], obuf[3]; + + if (2 + msg[1].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; @@ -481,7 +525,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], case 0x60: case 0x0c: { /* write to register */ - u8 obuf[msg[0].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[0].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[0].len); + return -EOPNOTSUPP; + } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); @@ -563,7 +613,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ - u8 ibuf[msg[j].len]; + u8 ibuf[MAX_XFER_SIZE]; + + if (msg[j].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); @@ -590,7 +647,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], } while (len > 0); } else if (j < (num - 1)) { /* write register addr before read */ - u8 obuf[msg[j].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); @@ -602,7 +666,13 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], break; } else { /* write registers */ - u8 obuf[msg[j].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); @@ -955,9 +1025,10 @@ static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; -static struct ts2020_config dw2104_ts2020_config = { +static struct ts2020_config dw2104_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, + .frequency_div = 1060000, }; static struct ds3000_config s660_ds3000_config = { @@ -966,6 +1037,12 @@ static struct ds3000_config s660_ds3000_config = { .set_lock_led = dw210x_led_ctrl, }; +static struct ts2020_config s660_ts2020_config = { + .tuner_address = 0x60, + .clk_out_div = 1, + .frequency_div = 1146000, +}; + static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, @@ -1205,7 +1282,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d) if (d->fe_adap[0].fe == NULL) return -EIO; - dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, + dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config, &d->dev->i2c_adap); st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage; @@ -1213,7 +1290,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d) dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); - info("Attached ds3000+ds2020!\n"); + info("Attached ds3000+ts2020!\n"); return 0; } diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c index 73cc50afa5e1..d666741797d4 100644 --- a/drivers/media/usb/em28xx/em28xx-camera.c +++ b/drivers/media/usb/em28xx/em28xx-camera.c @@ -22,6 +22,7 @@ #include <linux/i2c.h> #include <media/soc_camera.h> #include <media/mt9v011.h> +#include <media/v4l2-clk.h> #include <media/v4l2-common.h> #include "em28xx.h" @@ -47,6 +48,7 @@ static struct soc_camera_link camlink = { .bus_id = 0, .flags = 0, .module_name = "em28xx", + .unbalanced_power = true, }; @@ -325,13 +327,24 @@ int em28xx_detect_sensor(struct em28xx *dev) int em28xx_init_camera(struct em28xx *dev) { + char clk_name[V4L2_SUBDEV_NAME_SIZE]; + struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus]; + struct i2c_adapter *adap = &dev->i2c_adap[dev->def_i2c_bus]; + int ret = 0; + + v4l2_clk_name_i2c(clk_name, sizeof(clk_name), + i2c_adapter_id(adap), client->addr); + dev->clk = v4l2_clk_register_fixed(clk_name, "mclk", -EINVAL); + if (IS_ERR(dev->clk)) + return PTR_ERR(dev->clk); + switch (dev->em28xx_sensor) { case EM28XX_MT9V011: { struct mt9v011_platform_data pdata; struct i2c_board_info mt9v011_info = { .type = "mt9v011", - .addr = dev->i2c_client[dev->def_i2c_bus].addr, + .addr = client->addr, .platform_data = &pdata, }; @@ -352,10 +365,11 @@ int em28xx_init_camera(struct em28xx *dev) dev->sensor_xtal = 4300000; pdata.xtal = dev->sensor_xtal; if (NULL == - v4l2_i2c_new_subdev_board(&dev->v4l2_dev, - &dev->i2c_adap[dev->def_i2c_bus], - &mt9v011_info, NULL)) - return -ENODEV; + v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap, + &mt9v011_info, NULL)) { + ret = -ENODEV; + break; + } /* probably means GRGB 16 bit bayer */ dev->vinmode = 0x0d; dev->vinctl = 0x00; @@ -391,7 +405,7 @@ int em28xx_init_camera(struct em28xx *dev) struct i2c_board_info ov2640_info = { .type = "ov2640", .flags = I2C_CLIENT_SCCB, - .addr = dev->i2c_client[dev->def_i2c_bus].addr, + .addr = client->addr, .platform_data = &camlink, }; struct v4l2_mbus_framefmt fmt; @@ -408,9 +422,12 @@ int em28xx_init_camera(struct em28xx *dev) dev->sensor_yres = 480; subdev = - v4l2_i2c_new_subdev_board(&dev->v4l2_dev, - &dev->i2c_adap[dev->def_i2c_bus], + v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap, &ov2640_info, NULL); + if (NULL == subdev) { + ret = -ENODEV; + break; + } fmt.code = V4L2_MBUS_FMT_YUYV8_2X8; fmt.width = 640; @@ -427,8 +444,13 @@ int em28xx_init_camera(struct em28xx *dev) } case EM28XX_NOSENSOR: default: - return -EINVAL; + ret = -EINVAL; } - return 0; + if (ret < 0) { + v4l2_clk_unregister_fixed(dev->clk); + dev->clk = NULL; + } + + return ret; } diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index dc65742c4bbc..a5196697627f 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -36,6 +36,7 @@ #include <media/tvaudio.h> #include <media/i2c-addr.h> #include <media/tveeprom.h> +#include <media/v4l2-clk.h> #include <media/v4l2-common.h> #include "em28xx.h" @@ -95,8 +96,8 @@ static struct em28xx_reg_seq default_digital[] = { /* Board Hauppauge WinTV HVR 900 analog */ static struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = { {EM2820_R08_GPIO_CTRL, 0x2d, ~EM_GPIO_4, 10}, - {0x05, 0xff, 0x10, 10}, - { -1, -1, -1, -1}, + { 0x05, 0xff, 0x10, 10}, + { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 digital */ @@ -104,20 +105,20 @@ static struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x04, 0x0f, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 (R2) digital */ static struct em28xx_reg_seq hauppauge_wintv_hvr_900R2_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */ static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = { - {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10}, + { -1, -1, -1, -1}, }; /* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */ @@ -132,7 +133,7 @@ static struct em28xx_reg_seq em2882_kworld_315u_digital[] = { {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x7e, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = { @@ -140,19 +141,19 @@ static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = { {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq kworld_330u_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq kworld_330u_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Evga inDtube @@ -170,11 +171,11 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { {EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 1}, {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 1}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* - * KWorld PlusTV 340U and UB435-Q (ATSC) GPIOs map: + * KWorld PlusTV 340U, UB435-Q and UB435-Q V2 (ATSC) GPIOs map: * EM_GPIO_0 - currently unknown * EM_GPIO_1 - LED disable/enable (1 = off, 0 = on) * EM_GPIO_2 - currently unknown @@ -185,8 +186,8 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { * EM_GPIO_7 - currently unknown */ static struct em28xx_reg_seq kworld_a340_digital[] = { - {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, + { -1, -1, -1, -1}, }; /* Pinnacle Hybrid Pro eb1a:2881 */ @@ -205,13 +206,13 @@ static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = { static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* eb1a:2868 Reddo DVB-C USB TV Box @@ -225,7 +226,7 @@ static struct em28xx_reg_seq reddo_dvb_c_usb_box[] = { {EM2820_R08_GPIO_CTRL, 0x7f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x6f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, - {-1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Callback for the most boards */ @@ -233,23 +234,23 @@ static struct em28xx_reg_seq default_tuner_gpio[] = { {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, 0, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Mute/unmute */ static struct em28xx_reg_seq compro_unmute_tv_gpio[] = { - {EM2820_R08_GPIO_CTRL, 5, 7, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 5, 7, 10}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq compro_unmute_svid_gpio[] = { - {EM2820_R08_GPIO_CTRL, 4, 7, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 4, 7, 10}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq compro_mute_gpio[] = { - {EM2820_R08_GPIO_CTRL, 6, 7, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 6, 7, 10}, + { -1, -1, -1, -1}, }; /* Terratec AV350 */ @@ -279,21 +280,21 @@ static struct em28xx_reg_seq vc211a_enable[] = { static struct em28xx_reg_seq dikom_dk300_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Reset for the most [digital] boards */ static struct em28xx_reg_seq leadership_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq leadership_reset[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* 2013:024f PCTV nanoStick T2 290e @@ -304,7 +305,7 @@ static struct em28xx_reg_seq pctv_290e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 80}, {EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 80}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0xc0, 0xff, 80}, /* GPIO_7 = 1 */ - {-1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #if 0 @@ -313,14 +314,14 @@ static struct em28xx_reg_seq terratec_h5_gpio[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq terratec_h5_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #endif @@ -335,12 +336,12 @@ static struct em28xx_reg_seq terratec_h5_digital[] = { * GPIO_7 - LED (green LED) */ static struct em28xx_reg_seq pctv_460e[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50}, - {0x0d, 0xff, 0xff, 50}, - {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */ - {0x0d, 0x42, 0xff, 50}, - {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */ - { -1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50}, + { 0x0d, 0xff, 0xff, 50}, + {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */ + { 0x0d, 0x42, 0xff, 50}, + {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */ + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq c3tech_digital_duo_digital[] = { @@ -352,7 +353,7 @@ static struct em28xx_reg_seq c3tech_digital_duo_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 20}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #if 0 @@ -361,14 +362,14 @@ static struct em28xx_reg_seq hauppauge_930c_gpio[] = { {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, /* xc5000 reset */ {EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq hauppauge_930c_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #endif @@ -378,10 +379,10 @@ static struct em28xx_reg_seq hauppauge_930c_digital[] = { * GPIO_7 - LED, 0=active */ static struct em28xx_reg_seq maxmedia_ub425_tc[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100}, - {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */ - {-1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100}, + {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */ + { -1, -1, -1, -1}, }; /* 2304:0242 PCTV QuatroStick (510e) @@ -391,10 +392,10 @@ static struct em28xx_reg_seq maxmedia_ub425_tc[] = { * GPIO_7: LED, 1=active */ static struct em28xx_reg_seq pctv_510e[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, - {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ - { -1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, + {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ + { -1, -1, -1, -1}, }; /* 2013:0251 PCTV QuatroStick nano (520e) @@ -404,11 +405,11 @@ static struct em28xx_reg_seq pctv_510e[] = { * GPIO_7: LED, 1=active */ static struct em28xx_reg_seq pctv_520e[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, - {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */ - { -1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, + {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */ + { -1, -1, -1, -1}, }; /* @@ -2030,6 +2031,18 @@ struct em28xx_board em28xx_boards[] = { .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, + /* + * 1b80:e346 KWorld USB ATSC TV Stick UB435-Q V2 + * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2 + */ + [EM2874_BOARD_KWORLD_UB435Q_V2] = { + .name = "KWorld USB ATSC TV Stick UB435-Q V2", + .tuner_type = TUNER_ABSENT, + .has_dvb = 1, + .dvb_gpio = kworld_a340_digital, + .tuner_gpio = default_tuner_gpio, + .def_i2c_bus = 1, + }, }; const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); @@ -2173,6 +2186,8 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM2860_BOARD_GADMEI_UTV330 }, { USB_DEVICE(0x1b80, 0xa340), .driver_info = EM2870_BOARD_KWORLD_A340 }, + { USB_DEVICE(0x1b80, 0xe346), + .driver_info = EM2874_BOARD_KWORLD_UB435Q_V2 }, { USB_DEVICE(0x2013, 0x024f), .driver_info = EM28174_BOARD_PCTV_290E }, { USB_DEVICE(0x2013, 0x024c), @@ -2857,6 +2872,8 @@ void em28xx_release_resources(struct em28xx *dev) if (dev->def_i2c_bus) em28xx_i2c_unregister(dev, 1); em28xx_i2c_unregister(dev, 0); + if (dev->clk) + v4l2_clk_unregister_fixed(dev->clk); v4l2_ctrl_handler_free(&dev->ctrl_handler); diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index bb1e8dca80cd..344042bb845c 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -298,6 +298,18 @@ static struct lgdt3305_config em2870_lgdt3304_dev = { .qam_if_khz = 4000, }; +static struct lgdt3305_config em2874_lgdt3305_dev = { + .i2c_addr = 0x0e, + .demod_chip = LGDT3305, + .spectral_inversion = 1, + .deny_i2c_rptr = 0, + .mpeg_mode = LGDT3305_MPEG_SERIAL, + .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE, + .tpvalid_polarity = LGDT3305_TP_VALID_HIGH, + .vsb_if_khz = 3250, + .qam_if_khz = 4000, +}; + static struct s921_config sharp_isdbt = { .demod_address = 0x30 >> 1 }; @@ -329,6 +341,11 @@ static struct tda18271_config kworld_a340_config = { .std_map = &kworld_a340_std_map, }; +static struct tda18271_config kworld_ub435q_v2_config = { + .std_map = &kworld_a340_std_map, + .gate = TDA18271_GATE_DIGITAL, +}; + static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = { .demod_address = (0x1e >> 1), .no_tuner = 1, @@ -384,7 +401,10 @@ static struct drxk_config maxmedia_ub425_tc_drxk = { .adr = 0x29, .single_master = 1, .no_i2c_bridge = 1, + .microcode_name = "dvb-demod-drxk-01.fw", + .chunk_size = 62, .load_firmware_sync = true, + .qam_demod_parameter_count = 2, }; static struct drxk_config pctv_520e_drxk = { @@ -424,7 +444,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65}, {EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32}, {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq hauppauge_hvr930c_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01}, @@ -439,7 +459,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xcf, 0xff, 0x0b}, {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x65}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct { @@ -491,13 +511,13 @@ static void terratec_h5_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq terratec_h5_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct { unsigned char r[4]; @@ -547,12 +567,12 @@ static void terratec_htc_stick_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq terratec_htc_stick_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* @@ -594,13 +614,13 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq terratec_htc_usb_xs_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* @@ -1227,18 +1247,14 @@ static int em28xx_dvb_init(struct em28xx *dev) dvb->fe[0]->ops.i2c_gate_ctrl = NULL; /* attach tuner */ - if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0], - &dev->i2c_adap[dev->def_i2c_bus], 0x60)) { + if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60, + &dev->i2c_adap[dev->def_i2c_bus], + &em28xx_cxd2820r_tda18271_config)) { dvb_frontend_detach(dvb->fe[0]); result = -EINVAL; goto out_free; } } - - /* TODO: we need drx-3913k firmware in order to support DVB-T */ - em28xx_info("MaxMedia UB425-TC/Delock 61959: only DVB-C " \ - "supported by that driver version\n"); - break; case EM2884_BOARD_PCTV_510E: case EM2884_BOARD_PCTV_520E: @@ -1297,6 +1313,23 @@ static int em28xx_dvb_init(struct em28xx *dev) goto out_free; } break; + case EM2874_BOARD_KWORLD_UB435Q_V2: + dvb->fe[0] = dvb_attach(lgdt3305_attach, + &em2874_lgdt3305_dev, + &dev->i2c_adap[dev->def_i2c_bus]); + if (!dvb->fe[0]) { + result = -EINVAL; + goto out_free; + } + + /* Attach the demodulator. */ + if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60, + &dev->i2c_adap[dev->def_i2c_bus], + &kworld_ub435q_v2_config)) { + result = -EINVAL; + goto out_free; + } + break; default: em28xx_errdev("/2: The frontend of your DVB/ATSC card" " isn't supported yet\n"); diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 9d103344f34a..fc5d60efd4ab 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -638,7 +638,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) if (rc) return rc; - if (dev->streaming_users++ == 0) { + if (dev->streaming_users == 0) { /* First active streaming user, so allocate all the URBs */ /* Allocate the USB bandwidth */ @@ -657,7 +657,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) dev->packet_multiplier, em28xx_urb_data_copy); if (rc < 0) - goto fail; + return rc; /* * djh: it's not clear whether this code is still needed. I'm @@ -675,7 +675,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f); } -fail: + dev->streaming_users++; + return rc; } diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 205e9038b1c0..f8726ad5d0a8 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -131,6 +131,7 @@ #define EM2884_BOARD_TERRATEC_HTC_USB_XS 87 #define EM2884_BOARD_C3TECH_DIGITAL_DUO 88 #define EM2874_BOARD_DELOCK_61959 89 +#define EM2874_BOARD_KWORLD_UB435Q_V2 90 /* Limits minimum and default number of buffers */ #define EM28XX_MIN_BUF 4 @@ -492,6 +493,7 @@ struct em28xx { struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_clk *clk; struct em28xx_board board; /* Webcam specific fields */ diff --git a/drivers/media/usb/gspca/conex.c b/drivers/media/usb/gspca/conex.c index 38714df31ac4..2e15c80d6e3d 100644 --- a/drivers/media/usb/gspca/conex.c +++ b/drivers/media/usb/gspca/conex.c @@ -783,7 +783,8 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 064b53043b15..f23df4a9d8c5 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -1553,9 +1553,9 @@ static int sd_start(struct gspca_dev *gspca_dev) sd->params.format.videoSize = VIDEOSIZE_CIF; sd->params.roi.colEnd = sd->params.roi.colStart + - (gspca_dev->width >> 3); + (gspca_dev->pixfmt.width >> 3); sd->params.roi.rowEnd = sd->params.roi.rowStart + - (gspca_dev->height >> 2); + (gspca_dev->pixfmt.height >> 2); /* And now set the camera to a known state */ ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode, diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 048507b27bb2..f3a7ace0fac9 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -504,8 +504,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file, unsigned int frsz; int i; - i = gspca_dev->curr_mode; - frsz = gspca_dev->cam.cam_mode[i].sizeimage; + frsz = gspca_dev->pixfmt.sizeimage; PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz); frsz = PAGE_ALIGN(frsz); if (count >= GSPCA_MAX_FRAMES) @@ -627,16 +626,14 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt, static u32 which_bandwidth(struct gspca_dev *gspca_dev) { u32 bandwidth; - int i; /* get the (max) image size */ - i = gspca_dev->curr_mode; - bandwidth = gspca_dev->cam.cam_mode[i].sizeimage; + bandwidth = gspca_dev->pixfmt.sizeimage; /* if the image is compressed, estimate its mean size */ if (!gspca_dev->cam.needs_full_bandwidth && - bandwidth < gspca_dev->cam.cam_mode[i].width * - gspca_dev->cam.cam_mode[i].height) + bandwidth < gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height) bandwidth = bandwidth * 3 / 8; /* 0.375 */ /* estimate the frame rate */ @@ -650,7 +647,7 @@ static u32 which_bandwidth(struct gspca_dev *gspca_dev) /* don't hope more than 15 fps with USB 1.1 and * image resolution >= 640x480 */ - if (gspca_dev->width >= 640 + if (gspca_dev->pixfmt.width >= 640 && gspca_dev->dev->speed == USB_SPEED_FULL) bandwidth *= 15; /* 15 fps */ else @@ -982,9 +979,7 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev) i = gspca_dev->cam.nmodes - 1; /* take the highest mode */ gspca_dev->curr_mode = i; - gspca_dev->width = gspca_dev->cam.cam_mode[i].width; - gspca_dev->height = gspca_dev->cam.cam_mode[i].height; - gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat; + gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i]; /* does nothing if ctrl_handler == NULL */ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler); @@ -1105,10 +1100,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct gspca_dev *gspca_dev = video_drvdata(file); - int mode; - mode = gspca_dev->curr_mode; - fmt->fmt.pix = gspca_dev->cam.cam_mode[mode]; + fmt->fmt.pix = gspca_dev->pixfmt; /* some drivers use priv internally, zero it before giving it to userspace */ fmt->fmt.pix.priv = 0; @@ -1140,6 +1133,12 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev, mode = mode2; } fmt->fmt.pix = gspca_dev->cam.cam_mode[mode]; + if (gspca_dev->sd_desc->try_fmt) { + /* pass original resolution to subdriver try_fmt */ + fmt->fmt.pix.width = w; + fmt->fmt.pix.height = h; + gspca_dev->sd_desc->try_fmt(gspca_dev, fmt); + } /* some drivers use priv internally, zero it before giving it to userspace */ fmt->fmt.pix.priv = 0; @@ -1178,19 +1177,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, goto out; } - if (ret == gspca_dev->curr_mode) { - ret = 0; - goto out; /* same mode */ - } - if (gspca_dev->streaming) { ret = -EBUSY; goto out; } - gspca_dev->width = fmt->fmt.pix.width; - gspca_dev->height = fmt->fmt.pix.height; - gspca_dev->pixfmt = fmt->fmt.pix.pixelformat; gspca_dev->curr_mode = ret; + if (gspca_dev->sd_desc->try_fmt) + /* subdriver try_fmt can modify format parameters */ + gspca_dev->pixfmt = fmt->fmt.pix; + else + gspca_dev->pixfmt = gspca_dev->cam.cam_mode[ret]; ret = 0; out: @@ -1205,6 +1201,9 @@ static int vidioc_enum_framesizes(struct file *file, void *priv, int i; __u32 index = 0; + if (gspca_dev->sd_desc->enum_framesizes) + return gspca_dev->sd_desc->enum_framesizes(gspca_dev, fsize); + for (i = 0; i < gspca_dev->cam.nmodes; i++) { if (fsize->pixel_format != gspca_dev->cam.cam_mode[i].pixelformat) @@ -1471,8 +1470,9 @@ static int vidioc_streamon(struct file *file, void *priv, if (ret < 0) goto out; } - PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", gspca_dev->pixfmt, - gspca_dev->width, gspca_dev->height); + PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", + gspca_dev->pixfmt.pixelformat, + gspca_dev->pixfmt.width, gspca_dev->pixfmt.height); ret = 0; out: mutex_unlock(&gspca_dev->queue_lock); diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h index ac0b11f46f50..300642dc1a17 100644 --- a/drivers/media/usb/gspca/gspca.h +++ b/drivers/media/usb/gspca/gspca.h @@ -88,6 +88,10 @@ typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev, typedef int (*cam_int_pkt_op) (struct gspca_dev *gspca_dev, u8 *data, int len); +typedef void (*cam_format_op) (struct gspca_dev *gspca_dev, + struct v4l2_format *fmt); +typedef int (*cam_frmsize_op) (struct gspca_dev *gspca_dev, + struct v4l2_frmsizeenum *fsize); /* subdriver description */ struct sd_desc { @@ -109,6 +113,8 @@ struct sd_desc { cam_set_jpg_op set_jcomp; cam_streamparm_op get_streamparm; cam_streamparm_op set_streamparm; + cam_format_op try_fmt; + cam_frmsize_op enum_framesizes; #ifdef CONFIG_VIDEO_ADV_DEBUG cam_set_reg_op set_register; cam_get_reg_op get_register; @@ -183,9 +189,7 @@ struct gspca_dev { __u8 streaming; /* protected by both mutexes (*) */ __u8 curr_mode; /* current camera mode */ - __u32 pixfmt; /* current mode parameters */ - __u16 width; - __u16 height; + struct v4l2_pix_format pixfmt; /* current mode parameters */ __u32 sequence; /* frame sequence number */ wait_queue_head_t wq; /* wait queue */ diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c index 8da3dde38385..19736e237b37 100644 --- a/drivers/media/usb/gspca/jeilinj.c +++ b/drivers/media/usb/gspca/jeilinj.c @@ -378,11 +378,12 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *dev = (struct sd *) gspca_dev; /* create the JPEG header */ - jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ jpeg_set_qual(dev->jpeg_hdr, dev->quality); PDEBUG(D_STREAM, "Start streaming at %dx%d", - gspca_dev->height, gspca_dev->width); + gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); jlj_start(gspca_dev); return gspca_dev->usb_err; } diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c index fdaeeb14453f..5b481fa43099 100644 --- a/drivers/media/usb/gspca/jl2005bcd.c +++ b/drivers/media/usb/gspca/jl2005bcd.c @@ -455,7 +455,7 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; sd->cap_mode = gspca_dev->cam.cam_mode; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 640: PDEBUG(D_STREAM, "Start streaming at vga resolution"); jl2005c_stream_start_vga_lg(gspca_dev); diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c index cfa4663f8934..27fcef11aef4 100644 --- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c +++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c @@ -266,7 +266,7 @@ static int mt9m111_set_hvflip(struct gspca_dev *gspca_dev) return err; data[0] = MT9M111_RMB_OVER_SIZED; - if (gspca_dev->width == 640) { + if (gspca_dev->pixfmt.width == 640) { data[1] = MT9M111_RMB_ROW_SKIP_2X | MT9M111_RMB_COLUMN_SKIP_2X | (hflip << 1) | vflip; diff --git a/drivers/media/usb/gspca/mars.c b/drivers/media/usb/gspca/mars.c index ff2c5abf115b..779a8785f421 100644 --- a/drivers/media/usb/gspca/mars.c +++ b/drivers/media/usb/gspca/mars.c @@ -254,7 +254,8 @@ static int sd_start(struct gspca_dev *gspca_dev) int i; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); @@ -270,8 +271,8 @@ static int sd_start(struct gspca_dev *gspca_dev) data[0] = 0x00; /* address */ data[1] = 0x0c | 0x01; /* reg 0 */ data[2] = 0x01; /* reg 1 */ - data[3] = gspca_dev->width / 8; /* h_size , reg 2 */ - data[4] = gspca_dev->height / 8; /* v_size , reg 3 */ + data[3] = gspca_dev->pixfmt.width / 8; /* h_size , reg 2 */ + data[4] = gspca_dev->pixfmt.height / 8; /* v_size , reg 3 */ data[5] = 0x30; /* reg 4, MI, PAS5101 : * 0x30 for 24mhz , 0x28 for 12mhz */ data[6] = 0x02; /* reg 5, H start - was 0x04 */ diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c index 68bb2f359666..f006e29ca019 100644 --- a/drivers/media/usb/gspca/mr97310a.c +++ b/drivers/media/usb/gspca/mr97310a.c @@ -521,7 +521,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev) if (sd->sensor_type) data[5] = 0xbb; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */ /* fall thru */ @@ -618,7 +618,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) data[10] = 0x18; } - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: data[9] |= 0x0c; /* reg 8, 4:1 scale down */ /* fall thru */ @@ -847,7 +847,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 min_clockdiv) u8 clockdiv = (60 * expo + 7999) / 8000; /* Limit framerate to not exceed usb bandwidth */ - if (clockdiv < min_clockdiv && gspca_dev->width >= 320) + if (clockdiv < min_clockdiv && gspca_dev->pixfmt.width >= 320) clockdiv = min_clockdiv; else if (clockdiv < 2) clockdiv = 2; diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c index 44c9964b1b3e..599f755e75b8 100644 --- a/drivers/media/usb/gspca/nw80x.c +++ b/drivers/media/usb/gspca/nw80x.c @@ -1708,7 +1708,7 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val) reg_r(gspca_dev, 0x1004, 1); if (gspca_dev->usb_buf[0] & 0x04) { /* if AE_FULL_FRM */ - sd->ae_res = gspca_dev->width * gspca_dev->height; + sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; } else { /* get the AE window size */ reg_r(gspca_dev, 0x1011, 8); w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0] @@ -1717,7 +1717,8 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val) - (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6]; sd->ae_res = h * w; if (sd->ae_res == 0) - sd->ae_res = gspca_dev->width * gspca_dev->height; + sd->ae_res = gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height; } } @@ -1856,21 +1857,21 @@ static int sd_start(struct gspca_dev *gspca_dev) reg_w_buf(gspca_dev, cmd); switch (sd->webcam) { case P35u: - if (gspca_dev->width == 320) + if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, nw801_start_qvga); else reg_w_buf(gspca_dev, nw801_start_vga); reg_w_buf(gspca_dev, nw801_start_2); break; case Kr651us: - if (gspca_dev->width == 320) + if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, kr651_start_qvga); else reg_w_buf(gspca_dev, kr651_start_vga); reg_w_buf(gspca_dev, kr651_start_2); break; case Proscope: - if (gspca_dev->width == 320) + if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, proscope_start_qvga); else reg_w_buf(gspca_dev, proscope_start_vga); diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index 8937d79fd176..c95f32a0c02b 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -3468,7 +3468,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) switch (sd->bridge) { case BRIDGE_OVFX2: - if (gspca_dev->width != 800) + if (gspca_dev->pixfmt.width != 800) gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE; else gspca_dev->cam.bulk_size = 7 * 4096; @@ -3507,8 +3507,8 @@ static void ov511_mode_init_regs(struct sd *sd) /* Here I'm assuming that snapshot size == image size. * I hope that's always true. --claudio */ - hsegs = (sd->gspca_dev.width >> 3) - 1; - vsegs = (sd->gspca_dev.height >> 3) - 1; + hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1; + vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1; reg_w(sd, R511_CAM_PXCNT, hsegs); reg_w(sd, R511_CAM_LNCNT, vsegs); @@ -3541,7 +3541,7 @@ static void ov511_mode_init_regs(struct sd *sd) case SEN_OV7640: case SEN_OV7648: case SEN_OV76BE: - if (sd->gspca_dev.width == 320) + if (sd->gspca_dev.pixfmt.width == 320) interlaced = 1; /* Fall through */ case SEN_OV6630: @@ -3551,7 +3551,7 @@ static void ov511_mode_init_regs(struct sd *sd) case 30: case 25: /* Not enough bandwidth to do 640x480 @ 30 fps */ - if (sd->gspca_dev.width != 640) { + if (sd->gspca_dev.pixfmt.width != 640) { sd->clockdiv = 0; break; } @@ -3584,7 +3584,8 @@ static void ov511_mode_init_regs(struct sd *sd) /* Check if we have enough bandwidth to disable compression */ fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1; - needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2; + needed = fps * sd->gspca_dev.pixfmt.width * + sd->gspca_dev.pixfmt.height * 3 / 2; /* 1000 isoc packets/sec */ if (needed > 1000 * packet_size) { /* Enable Y and UV quantization and compression */ @@ -3646,8 +3647,8 @@ static void ov518_mode_init_regs(struct sd *sd) reg_w(sd, 0x38, 0x80); } - hsegs = sd->gspca_dev.width / 16; - vsegs = sd->gspca_dev.height / 4; + hsegs = sd->gspca_dev.pixfmt.width / 16; + vsegs = sd->gspca_dev.pixfmt.height / 4; reg_w(sd, 0x29, hsegs); reg_w(sd, 0x2a, vsegs); @@ -3686,7 +3687,8 @@ static void ov518_mode_init_regs(struct sd *sd) * happened to be with revision < 2 cams using an * OV7620 and revision 2 cams using an OV7620AE. */ - if (sd->revision > 0 && sd->gspca_dev.width == 640) { + if (sd->revision > 0 && + sd->gspca_dev.pixfmt.width == 640) { reg_w(sd, 0x20, 0x60); reg_w(sd, 0x21, 0x1f); } else { @@ -3812,8 +3814,8 @@ static void ov519_mode_init_regs(struct sd *sd) break; } - reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4); - reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3); + reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.pixfmt.width >> 4); + reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.pixfmt.height >> 3); if (sd->sensor == SEN_OV7670 && sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv) reg_w(sd, OV519_R12_X_OFFSETL, 0x04); @@ -3947,14 +3949,16 @@ static void mode_init_ov_sensor_regs(struct sd *sd) } case SEN_OV3610: if (qvga) { - xstart = (1040 - gspca_dev->width) / 2 + (0x1f << 4); - ystart = (776 - gspca_dev->height) / 2; + xstart = (1040 - gspca_dev->pixfmt.width) / 2 + + (0x1f << 4); + ystart = (776 - gspca_dev->pixfmt.height) / 2; } else { - xstart = (2076 - gspca_dev->width) / 2 + (0x10 << 4); - ystart = (1544 - gspca_dev->height) / 2; + xstart = (2076 - gspca_dev->pixfmt.width) / 2 + + (0x10 << 4); + ystart = (1544 - gspca_dev->pixfmt.height) / 2; } - xend = xstart + gspca_dev->width; - yend = ystart + gspca_dev->height; + xend = xstart + gspca_dev->pixfmt.width; + yend = ystart + gspca_dev->pixfmt.height; /* Writing to the COMH register resets the other windowing regs to their default values, so we must do this first. */ i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0); @@ -4229,8 +4233,8 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; /* Default for most bridges, allow bridge_mode_init_regs to override */ - sd->sensor_width = sd->gspca_dev.width; - sd->sensor_height = sd->gspca_dev.height; + sd->sensor_width = sd->gspca_dev.pixfmt.width; + sd->sensor_height = sd->gspca_dev.pixfmt.height; switch (sd->bridge) { case BRIDGE_OV511: @@ -4345,12 +4349,13 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev, ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1); if (in[8] & 0x80) { /* Frame end */ - if ((in[9] + 1) * 8 != gspca_dev->width || - (in[10] + 1) * 8 != gspca_dev->height) { + if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width || + (in[10] + 1) * 8 != gspca_dev->pixfmt.height) { PERR("Invalid frame size, got: %dx%d," " requested: %dx%d\n", (in[9] + 1) * 8, (in[10] + 1) * 8, - gspca_dev->width, gspca_dev->height); + gspca_dev->pixfmt.width, + gspca_dev->pixfmt.height); gspca_dev->last_packet_type = DISCARD_PACKET; return; } @@ -4470,7 +4475,8 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev, if (sd->first_frame) { sd->first_frame--; if (gspca_dev->image_len < - sd->gspca_dev.width * sd->gspca_dev.height) + sd->gspca_dev.pixfmt.width * + sd->gspca_dev.pixfmt.height) gspca_dev->last_packet_type = DISCARD_PACKET; } gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index 03a33c46ca2c..90f0d637cd9d 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -1440,9 +1440,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, /* If this packet is marked as EOF, end the frame */ } else if (data[1] & UVC_STREAM_EOF) { sd->last_pts = 0; - if (gspca_dev->pixfmt == V4L2_PIX_FMT_YUYV + if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV && gspca_dev->image_len + len - 12 != - gspca_dev->width * gspca_dev->height * 2) { + gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height * 2) { PDEBUG(D_PACK, "wrong sized frame"); goto discard; } diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c index c4cd028fe0b4..47085cf2d723 100644 --- a/drivers/media/usb/gspca/ov534_9.c +++ b/drivers/media/usb/gspca/ov534_9.c @@ -59,6 +59,7 @@ enum sensors { SENSOR_OV965x, /* ov9657 */ SENSOR_OV971x, /* ov9712 */ SENSOR_OV562x, /* ov5621 */ + SENSOR_OV361x, /* ov3610 */ NSENSORS }; @@ -106,6 +107,274 @@ static const struct v4l2_pix_format ov562x_mode[] = { } }; +enum ov361x { + ov361x_2048 = 0, + ov361x_1600, + ov361x_1024, + ov361x_640, + ov361x_320, + ov361x_160, + ov361x_last +}; + +static const struct v4l2_pix_format ov361x_mode[] = { + {0x800, 0x600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 0x800, + .sizeimage = 0x800 * 0x600, + .colorspace = V4L2_COLORSPACE_SRGB}, + {1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 1600, + .sizeimage = 1600 * 1200, + .colorspace = V4L2_COLORSPACE_SRGB}, + {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 768, + .sizeimage = 1024 * 768, + .colorspace = V4L2_COLORSPACE_SRGB}, + {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480, + .colorspace = V4L2_COLORSPACE_SRGB}, + {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240, + .colorspace = V4L2_COLORSPACE_SRGB}, + {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 160, + .sizeimage = 160 * 120, + .colorspace = V4L2_COLORSPACE_SRGB} +}; + +static const u8 ov361x_start_2048[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0c}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x00}, + {0x17, 0x10}, + {0x18, 0x90}, + {0x19, 0x00}, + {0x1a, 0xc0}, +}; +static const u8 ov361x_bridge_start_2048[][2] = { + {0xf1, 0x60}, + {0x88, 0x00}, + {0x89, 0x08}, + {0x8a, 0x00}, + {0x8b, 0x06}, + {0x8c, 0x01}, + {0x8d, 0x10}, + {0x1c, 0x00}, + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, + {0x1d, 0x2e}, + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_1600[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x00}, + {0x17, 0x10}, + {0x18, 0x90}, + {0x19, 0x00}, + {0x1a, 0xc0}, +}; +static const u8 ov361x_bridge_start_1600[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x08}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x06}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_1024[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; +static const u8 ov361x_bridge_start_1024[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_640[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; + +static const u8 ov361x_bridge_start_640[][2] = { + {0xf1, 0x60}, /* Hsize[7:0]*/ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_320[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; + +static const u8 ov361x_bridge_start_320[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer; */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_160[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; + +static const u8 ov361x_bridge_start_160[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + static const u8 bridge_init[][2] = { {0x88, 0xf8}, {0x89, 0xff}, @@ -898,7 +1167,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev) int i; for (i = 0; i < 5; i++) { - msleep(10); + msleep(20); data = reg_r(gspca_dev, OV534_REG_STATUS); switch (data) { @@ -1221,6 +1490,13 @@ static int sd_init(struct gspca_dev *gspca_dev) sccb_w_array(gspca_dev, ov562x_init_2, ARRAY_SIZE(ov562x_init_2)); reg_w(gspca_dev, 0xe0, 0x00); + } else if ((sensor_id & 0xfff0) == 0x3610) { + sd->sensor = SENSOR_OV361x; + gspca_dev->cam.cam_mode = ov361x_mode; + gspca_dev->cam.nmodes = ARRAY_SIZE(ov361x_mode); + reg_w(gspca_dev, 0xe7, 0x3a); + reg_w(gspca_dev, 0xf1, 0x60); + sccb_write(gspca_dev, 0x12, 0x80); } else { pr_err("Unknown sensor %04x", sensor_id); return -EINVAL; @@ -1229,6 +1505,53 @@ static int sd_init(struct gspca_dev *gspca_dev) return gspca_dev->usb_err; } +static int sd_start_ov361x(struct gspca_dev *gspca_dev) +{ + sccb_write(gspca_dev, 0x12, 0x80); + msleep(20); + switch (gspca_dev->curr_mode % (ov361x_last)) { + case ov361x_2048: + reg_w_array(gspca_dev, ov361x_bridge_start_2048, + ARRAY_SIZE(ov361x_bridge_start_2048)); + sccb_w_array(gspca_dev, ov361x_start_2048, + ARRAY_SIZE(ov361x_start_2048)); + break; + case ov361x_1600: + reg_w_array(gspca_dev, ov361x_bridge_start_1600, + ARRAY_SIZE(ov361x_bridge_start_1600)); + sccb_w_array(gspca_dev, ov361x_start_1600, + ARRAY_SIZE(ov361x_start_1600)); + break; + case ov361x_1024: + reg_w_array(gspca_dev, ov361x_bridge_start_1024, + ARRAY_SIZE(ov361x_bridge_start_1024)); + sccb_w_array(gspca_dev, ov361x_start_1024, + ARRAY_SIZE(ov361x_start_1024)); + break; + case ov361x_640: + reg_w_array(gspca_dev, ov361x_bridge_start_640, + ARRAY_SIZE(ov361x_bridge_start_640)); + sccb_w_array(gspca_dev, ov361x_start_640, + ARRAY_SIZE(ov361x_start_640)); + break; + case ov361x_320: + reg_w_array(gspca_dev, ov361x_bridge_start_320, + ARRAY_SIZE(ov361x_bridge_start_320)); + sccb_w_array(gspca_dev, ov361x_start_320, + ARRAY_SIZE(ov361x_start_320)); + break; + case ov361x_160: + reg_w_array(gspca_dev, ov361x_bridge_start_160, + ARRAY_SIZE(ov361x_bridge_start_160)); + sccb_w_array(gspca_dev, ov361x_start_160, + ARRAY_SIZE(ov361x_start_160)); + break; + } + reg_w(gspca_dev, 0xe0, 0x00); /* start transfer */ + + return gspca_dev->usb_err; +} + static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; @@ -1237,6 +1560,8 @@ static int sd_start(struct gspca_dev *gspca_dev) return gspca_dev->usb_err; if (sd->sensor == SENSOR_OV562x) return gspca_dev->usb_err; + if (sd->sensor == SENSOR_OV361x) + return sd_start_ov361x(gspca_dev); switch (gspca_dev->curr_mode) { case QVGA_MODE: /* 320x240 */ @@ -1290,6 +1615,11 @@ static int sd_start(struct gspca_dev *gspca_dev) static void sd_stopN(struct gspca_dev *gspca_dev) { + if (((struct sd *)gspca_dev)->sensor == SENSOR_OV361x) { + reg_w(gspca_dev, 0xe0, 0x01); /* stop transfer */ + /* reg_w(gspca_dev, 0x31, 0x09); */ + return; + } reg_w(gspca_dev, 0xe0, 0x01); set_led(gspca_dev, 0); reg_w(gspca_dev, 0xe0, 0x00); @@ -1425,6 +1755,8 @@ static int sd_init_controls(struct gspca_dev *gspca_dev) if (sd->sensor == SENSOR_OV971x) return 0; + if (sd->sensor == SENSOR_OV361x) + return 0; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 7); if (sd->sensor == SENSOR_OV562x) { diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c index 83519be94e58..cd79c180f67b 100644 --- a/drivers/media/usb/gspca/pac207.c +++ b/drivers/media/usb/gspca/pac207.c @@ -299,7 +299,7 @@ static int sd_start(struct gspca_dev *gspca_dev) pac207_write_regs(gspca_dev, 0x0042, pac207_sensor_init[3], 8); /* Compression Balance */ - if (gspca_dev->width == 176) + if (gspca_dev->pixfmt.width == 176) pac207_write_reg(gspca_dev, 0x4a, 0xff); else pac207_write_reg(gspca_dev, 0x4a, 0x30); @@ -317,7 +317,7 @@ static int sd_start(struct gspca_dev *gspca_dev) mode = 0x00; else mode = 0x02; - if (gspca_dev->width == 176) { /* 176x144 */ + if (gspca_dev->pixfmt.width == 176) { /* 176x144 */ mode |= 0x01; PDEBUG(D_STREAM, "pac207_start mode 176x144"); } else { /* 352x288 */ diff --git a/drivers/media/usb/gspca/pac7311.c b/drivers/media/usb/gspca/pac7311.c index 1a5bdc853a80..25f86b1e74a8 100644 --- a/drivers/media/usb/gspca/pac7311.c +++ b/drivers/media/usb/gspca/pac7311.c @@ -326,7 +326,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val) * 640x480 mode and page 4 reg 2 <= 3 then it must be 9 */ reg_w(gspca_dev, 0xff, 0x01); - if (gspca_dev->width != 640 && val <= 3) + if (gspca_dev->pixfmt.width != 640 && val <= 3) reg_w(gspca_dev, 0x08, 0x09); else reg_w(gspca_dev, 0x08, 0x08); @@ -337,7 +337,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val) * camera to use higher compression or we may run out of * bandwidth. */ - if (gspca_dev->width == 640 && val == 2) + if (gspca_dev->pixfmt.width == 640 && val == 2) reg_w(gspca_dev, 0x80, 0x01); else reg_w(gspca_dev, 0x80, 0x1c); @@ -615,7 +615,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, /* Start the new frame with the jpeg header */ pac_start_frame(gspca_dev, - gspca_dev->height, gspca_dev->width); + gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c index 5f729b8aa2bd..5102cea50471 100644 --- a/drivers/media/usb/gspca/se401.c +++ b/drivers/media/usb/gspca/se401.c @@ -354,9 +354,9 @@ static int sd_start(struct gspca_dev *gspca_dev) /* set size + mode */ se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH, - gspca_dev->width * mult, 0); + gspca_dev->pixfmt.width * mult, 0); se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT, - gspca_dev->height * mult, 0); + gspca_dev->pixfmt.height * mult, 0); /* * HDG: disabled this as it does not seem to do anything * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE, @@ -480,7 +480,7 @@ static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len) static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *)gspca_dev; - int imagesize = gspca_dev->width * gspca_dev->height; + int imagesize = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; int i, plen, bits, pixels, info, count; if (sd->restart_stream) diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c index f4453d52801b..2a38621cf718 100644 --- a/drivers/media/usb/gspca/sn9c20x.c +++ b/drivers/media/usb/gspca/sn9c20x.c @@ -1955,7 +1955,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) return 0; } - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ gspca_dev->alt = 2; break; @@ -1985,8 +1985,8 @@ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; - int width = gspca_dev->width; - int height = gspca_dev->height; + int width = gspca_dev->pixfmt.width; + int height = gspca_dev->pixfmt.height; u8 fmt, scale = 0; jpeg_define(sd->jpeg_hdr, height, width, diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index d7ff3b9687c5..7277dbd2afcd 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -513,10 +513,7 @@ static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf) if (gspca_dev->usb_buf[0] & 0x04) { if (gspca_dev->usb_buf[0] & 0x08) { dev_err(gspca_dev->v4l2_dev.dev, - "i2c error writing %02x %02x %02x %02x" - " %02x %02x %02x %02x\n", - buf[0], buf[1], buf[2], buf[3], - buf[4], buf[5], buf[6], buf[7]); + "i2c error writing %8ph\n", buf); gspca_dev->usb_err = -EIO; } return; @@ -753,7 +750,7 @@ static void setexposure(struct gspca_dev *gspca_dev) /* In 640x480, if the reg11 has less than 4, the image is unstable (the bridge goes into a higher compression mode which we have not reverse engineered yet). */ - if (gspca_dev->width == 640 && reg11 < 4) + if (gspca_dev->pixfmt.width == 640 && reg11 < 4) reg11 = 4; /* frame exposure time in ms = 1000 * reg11 / 30 -> diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index 3b5ccb1c4cdf..c69b45d7cfbf 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c @@ -2204,7 +2204,8 @@ static int sd_start(struct gspca_dev *gspca_dev) { 0x14, 0xe7, 0x1e, 0xdd }; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ /* initialize the bridge */ diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c index 688592b289ea..f38fd8949609 100644 --- a/drivers/media/usb/gspca/spca1528.c +++ b/drivers/media/usb/gspca/spca1528.c @@ -255,7 +255,8 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; /* initialize the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ /* the JPEG quality shall be 85% */ diff --git a/drivers/media/usb/gspca/spca500.c b/drivers/media/usb/gspca/spca500.c index 9f8bf51fd64b..f011a309dd65 100644 --- a/drivers/media/usb/gspca/spca500.c +++ b/drivers/media/usb/gspca/spca500.c @@ -608,7 +608,8 @@ static int sd_start(struct gspca_dev *gspca_dev) __u8 xmult, ymult; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c index acb19fb9a3df..aa21edc9502d 100644 --- a/drivers/media/usb/gspca/sq905c.c +++ b/drivers/media/usb/gspca/sq905c.c @@ -272,7 +272,7 @@ static int sd_start(struct gspca_dev *gspca_dev) dev->cap_mode = gspca_dev->cam.cam_mode; /* "Open the shutter" and set size, to start capture */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 640: PDEBUG(D_STREAM, "Start streaming at high resolution"); dev->cap_mode++; diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c index b10d0821111c..e274cf19a3ea 100644 --- a/drivers/media/usb/gspca/sq930x.c +++ b/drivers/media/usb/gspca/sq930x.c @@ -906,7 +906,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) gspca_dev->cam.bulk_nurbs = 1; /* there must be one URB only */ sd->do_ctrl = 0; - gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8; + gspca_dev->cam.bulk_size = gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height + 8; return 0; } diff --git a/drivers/media/usb/gspca/stk014.c b/drivers/media/usb/gspca/stk014.c index 8c0982607f25..b0c70fea760b 100644 --- a/drivers/media/usb/gspca/stk014.c +++ b/drivers/media/usb/gspca/stk014.c @@ -250,7 +250,8 @@ static int sd_start(struct gspca_dev *gspca_dev) int ret, value; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); @@ -261,7 +262,7 @@ static int sd_start(struct gspca_dev *gspca_dev) set_par(gspca_dev, 0x00000000); set_par(gspca_dev, 0x8002e001); set_par(gspca_dev, 0x14000000); - if (gspca_dev->width > 320) + if (gspca_dev->pixfmt.width > 320) value = 0x8002e001; /* 640x480 */ else value = 0x4001f000; /* 320x240 */ diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c index 585868835ace..1fc80af2a189 100644 --- a/drivers/media/usb/gspca/stk1135.c +++ b/drivers/media/usb/gspca/stk1135.c @@ -48,42 +48,11 @@ struct sd { }; static const struct v4l2_pix_format stk1135_modes[] = { - {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 160, - .sizeimage = 160 * 120, - .colorspace = V4L2_COLORSPACE_SRGB}, - {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 176, - .sizeimage = 176 * 144, - .colorspace = V4L2_COLORSPACE_SRGB}, - {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 320, - .sizeimage = 320 * 240, - .colorspace = V4L2_COLORSPACE_SRGB}, - {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 352, - .sizeimage = 352 * 288, - .colorspace = V4L2_COLORSPACE_SRGB}, + /* default mode (this driver supports variable resolution) */ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB}, - {720, 576, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 720, - .sizeimage = 720 * 576, - .colorspace = V4L2_COLORSPACE_SRGB}, - {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 800, - .sizeimage = 800 * 600, - .colorspace = V4L2_COLORSPACE_SRGB}, - {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 1024, - .sizeimage = 1024 * 768, - .colorspace = V4L2_COLORSPACE_SRGB}, - {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 1280, - .sizeimage = 1280 * 1024, - .colorspace = V4L2_COLORSPACE_SRGB}, }; /* -- read a register -- */ @@ -347,16 +316,16 @@ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev) sensor_write(gspca_dev, cfg[i].reg, cfg[i].val); /* set output size */ - width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width; - height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height; - if (width <= 640) { /* use context A (half readout speed by default) */ + width = gspca_dev->pixfmt.width; + height = gspca_dev->pixfmt.height; + if (width <= 640 && height <= 512) { /* context A (half readout speed)*/ sensor_write(gspca_dev, 0x1a7, width); sensor_write(gspca_dev, 0x1aa, height); /* set read mode context A */ sensor_write(gspca_dev, 0x0c8, 0x0000); /* set resize, read mode, vblank, hblank context A */ sensor_write(gspca_dev, 0x2c8, 0x0000); - } else { /* use context B (full readout speed by default) */ + } else { /* context B (full readout speed) */ sensor_write(gspca_dev, 0x1a1, width); sensor_write(gspca_dev, 0x1a4, height); /* set read mode context B */ @@ -484,8 +453,8 @@ static int sd_start(struct gspca_dev *gspca_dev) reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00); /* set capture end position */ - width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width; - height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height; + width = gspca_dev->pixfmt.width; + height = gspca_dev->pixfmt.height; reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff); reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8); reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff); @@ -643,6 +612,35 @@ static int sd_init_controls(struct gspca_dev *gspca_dev) return 0; } +static void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt) +{ + fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U); + fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U); + /* round up to even numbers */ + fmt->fmt.pix.width += (fmt->fmt.pix.width & 1); + fmt->fmt.pix.height += (fmt->fmt.pix.height & 1); + + fmt->fmt.pix.bytesperline = fmt->fmt.pix.width; + fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height; +} + +static int stk1135_enum_framesizes(struct gspca_dev *gspca_dev, + struct v4l2_frmsizeenum *fsize) +{ + if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = 32; + fsize->stepwise.min_height = 32; + fsize->stepwise.max_width = 1280; + fsize->stepwise.max_height = 1024; + fsize->stepwise.step_width = 2; + fsize->stepwise.step_height = 2; + + return 0; +} + /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, @@ -653,6 +651,8 @@ static const struct sd_desc sd_desc = { .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = stk1135_dq_callback, + .try_fmt = stk1135_try_fmt, + .enum_framesizes = stk1135_enum_framesizes, }; /* -- module initialisation -- */ diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c index 55ee7a61c67f..49d209bbf9ee 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c @@ -452,7 +452,7 @@ frame_data: NULL, 0); if (sd->bridge == BRIDGE_ST6422) - sd->to_skip = gspca_dev->width * 4; + sd->to_skip = gspca_dev->pixfmt.width * 4; if (chunk_len) PERR("Chunk length is " diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c index 8206b7743300..8d785edcccf2 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c @@ -421,7 +421,7 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val) /* Number of pixels counted by the sensor when subsampling the pixels. * Slightly larger than the real value to avoid oscillation */ - totalpixels = gspca_dev->width * gspca_dev->height; + totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; totalpixels = totalpixels/(8*8) + totalpixels/(64*64); brightpixels = (totalpixels * val) >> 8; diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index af8767a9bd4c..a517d185febe 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -715,7 +715,8 @@ static int sd_start(struct gspca_dev *gspca_dev) int enable; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c index 4cb511ccc5f6..640c2fe760b3 100644 --- a/drivers/media/usb/gspca/topro.c +++ b/drivers/media/usb/gspca/topro.c @@ -3856,7 +3856,7 @@ static void setsharpness(struct gspca_dev *gspca_dev, s32 val) if (sd->bridge == BRIDGE_TP6800) { val |= 0x08; /* grid compensation enable */ - if (gspca_dev->width == 640) + if (gspca_dev->pixfmt.width == 640) reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ else val |= 0x04; /* scaling down enable */ @@ -3880,7 +3880,7 @@ static void set_resolution(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); - if (gspca_dev->width == 320) { + if (gspca_dev->pixfmt.width == 320) { reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06); msleep(100); i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); @@ -3924,7 +3924,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev) /* 640x480 * 30 fps does not work */ if (i == 6 /* if 30 fps */ - && gspca_dev->width == 640) + && gspca_dev->pixfmt.width == 640) i = 0x05; /* 15 fps */ } else { for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) { @@ -3935,7 +3935,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev) /* 640x480 * 30 fps does not work */ if (i == 7 /* if 30 fps */ - && gspca_dev->width == 640) + && gspca_dev->pixfmt.width == 640) i = 6; /* 15 fps */ i |= 0x80; /* clock * 1 */ } @@ -4554,7 +4554,8 @@ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width); + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width); set_dqt(gspca_dev, sd->quality); if (sd->bridge == BRIDGE_TP6800) { if (sd->sensor == SENSOR_CX0342) @@ -4737,7 +4738,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev) (gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] + (gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28]) / 8; - if (gspca_dev->width == 640) + if (gspca_dev->pixfmt.width == 640) luma /= 4; reg_w(gspca_dev, 0x7d, 0x00); diff --git a/drivers/media/usb/gspca/tv8532.c b/drivers/media/usb/gspca/tv8532.c index 8591324a53e1..d497ba38af0d 100644 --- a/drivers/media/usb/gspca/tv8532.c +++ b/drivers/media/usb/gspca/tv8532.c @@ -268,7 +268,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, packet_type0 = packet_type1 = INTER_PACKET; if (gspca_dev->empty_packet) { gspca_dev->empty_packet = 0; - sd->packet = gspca_dev->height / 2; + sd->packet = gspca_dev->pixfmt.height / 2; packet_type0 = FIRST_PACKET; } else if (sd->packet == 0) return; /* 2 more lines in 352x288 ! */ @@ -284,9 +284,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, * - 4 bytes */ gspca_frame_add(gspca_dev, packet_type0, - data + 2, gspca_dev->width); + data + 2, gspca_dev->pixfmt.width); gspca_frame_add(gspca_dev, packet_type1, - data + gspca_dev->width + 5, gspca_dev->width); + data + gspca_dev->pixfmt.width + 5, + gspca_dev->pixfmt.width); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c index a2275cfe0b81..103f6c4236b0 100644 --- a/drivers/media/usb/gspca/vicam.c +++ b/drivers/media/usb/gspca/vicam.c @@ -121,13 +121,13 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size) memset(req_data, 0, 16); req_data[0] = gain; - if (gspca_dev->width == 256) + if (gspca_dev->pixfmt.width == 256) req_data[1] |= 0x01; /* low nibble x-scale */ - if (gspca_dev->height <= 122) { + if (gspca_dev->pixfmt.height <= 122) { req_data[1] |= 0x10; /* high nibble y-scale */ - unscaled_height = gspca_dev->height * 2; + unscaled_height = gspca_dev->pixfmt.height * 2; } else - unscaled_height = gspca_dev->height; + unscaled_height = gspca_dev->pixfmt.height; req_data[2] = 0x90; /* unknown, does not seem to do anything */ if (unscaled_height <= 200) req_data[3] = 0x06; /* vend? */ diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c index 2165da0c7ce1..fb9fe2ef3a6f 100644 --- a/drivers/media/usb/gspca/w996Xcf.c +++ b/drivers/media/usb/gspca/w996Xcf.c @@ -430,11 +430,11 @@ static void w9968cf_set_crop_window(struct sd *sd) #define SC(x) ((x) << 10) /* Scaling factors */ - fw = SC(sd->gspca_dev.width) / max_width; - fh = SC(sd->gspca_dev.height) / max_height; + fw = SC(sd->gspca_dev.pixfmt.width) / max_width; + fh = SC(sd->gspca_dev.pixfmt.height) / max_height; - cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.width) / fh; - ch = (fw >= fh) ? SC(sd->gspca_dev.height) / fw : max_height; + cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.pixfmt.width) / fh; + ch = (fw >= fh) ? SC(sd->gspca_dev.pixfmt.height) / fw : max_height; sd->sensor_width = max_width; sd->sensor_height = max_height; @@ -454,34 +454,34 @@ static void w9968cf_mode_init_regs(struct sd *sd) w9968cf_set_crop_window(sd); - reg_w(sd, 0x14, sd->gspca_dev.width); - reg_w(sd, 0x15, sd->gspca_dev.height); + reg_w(sd, 0x14, sd->gspca_dev.pixfmt.width); + reg_w(sd, 0x15, sd->gspca_dev.pixfmt.height); /* JPEG width & height */ - reg_w(sd, 0x30, sd->gspca_dev.width); - reg_w(sd, 0x31, sd->gspca_dev.height); + reg_w(sd, 0x30, sd->gspca_dev.pixfmt.width); + reg_w(sd, 0x31, sd->gspca_dev.pixfmt.height); /* Y & UV frame buffer strides (in WORD) */ if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat == V4L2_PIX_FMT_JPEG) { - reg_w(sd, 0x2c, sd->gspca_dev.width / 2); - reg_w(sd, 0x2d, sd->gspca_dev.width / 4); + reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width / 2); + reg_w(sd, 0x2d, sd->gspca_dev.pixfmt.width / 4); } else - reg_w(sd, 0x2c, sd->gspca_dev.width); + reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width); reg_w(sd, 0x00, 0xbf17); /* reset everything */ reg_w(sd, 0x00, 0xbf10); /* normal operation */ /* Transfer size in WORDS (for UYVY format only) */ - val = sd->gspca_dev.width * sd->gspca_dev.height; + val = sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height; reg_w(sd, 0x3d, val & 0xffff); /* low bits */ reg_w(sd, 0x3e, val >> 16); /* high bits */ if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat == V4L2_PIX_FMT_JPEG) { /* We may get called multiple times (usb isoc bw negotiat.) */ - jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height, - sd->gspca_dev.width, 0x22); /* JPEG 420 */ + jpeg_define(sd->jpeg_hdr, sd->gspca_dev.pixfmt.height, + sd->gspca_dev.pixfmt.width, 0x22); /* JPEG 420 */ jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual)); w9968cf_upload_quantizationtables(sd); v4l2_ctrl_grab(sd->jpegqual, true); diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c index 7eaf64eb867c..a41aa7817c54 100644 --- a/drivers/media/usb/gspca/xirlink_cit.c +++ b/drivers/media/usb/gspca/xirlink_cit.c @@ -1471,14 +1471,14 @@ static int cit_get_clock_div(struct gspca_dev *gspca_dev) while (clock_div > 3 && 1000 * packet_size > - gspca_dev->width * gspca_dev->height * + gspca_dev->pixfmt.width * gspca_dev->pixfmt.height * fps[clock_div - 1] * 3 / 2) clock_div--; PDEBUG(D_PROBE, "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)", - packet_size, gspca_dev->width, gspca_dev->height, clock_div, - fps[clock_div]); + packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height, + clock_div, fps[clock_div]); return clock_div; } @@ -1502,7 +1502,7 @@ static int cit_start_model0(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x0002, 0x0426); cit_write_reg(gspca_dev, 0x0014, 0x0427); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0004, 0x010b); cit_write_reg(gspca_dev, 0x0001, 0x010a); @@ -1643,7 +1643,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x00, 0x0101); cit_write_reg(gspca_dev, 0x00, 0x010a); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x80, 0x0103); cit_write_reg(gspca_dev, 0x60, 0x0105); @@ -1700,7 +1700,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev) } /* Assorted init */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ @@ -1753,7 +1753,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x0001, 0x0102); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ @@ -1792,7 +1792,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0050, 0x0111); cit_write_reg(gspca_dev, 0x00d0, 0x0111); @@ -1840,7 +1840,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) * Magic control of CMOS sensor. Only lower values like * 0-3 work, and picture shifts left or right. Don't change. */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0002); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ @@ -1899,7 +1899,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) * does not allow arbitrary values and apparently is a bit mask, to * be activated only at appropriate time. Don't change it randomly! */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2); break; @@ -2023,7 +2023,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev) cit_model3_Packet1(gspca_dev, 0x009e, 0x0096); cit_model3_Packet1(gspca_dev, 0x009f, 0x000a); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ @@ -2134,7 +2134,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev) like with the IBM netcam pro). */ cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ @@ -2211,7 +2211,7 @@ static int cit_start_model4(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_model4_Packet1(gspca_dev, 0x0034, 0x0000); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); @@ -2531,7 +2531,7 @@ static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0024, 0x010b); cit_write_reg(gspca_dev, 0x0089, 0x0119); @@ -2635,7 +2635,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) struct usb_host_interface *alt; int max_packet_size; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: max_packet_size = 450; break; @@ -2659,7 +2659,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev) int ret, packet_size, min_packet_size; struct usb_host_interface *alt; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: min_packet_size = 200; break; @@ -2780,7 +2780,7 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) case CIT_MODEL1: case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ byte3 = 0x02; byte4 = 0x0a; @@ -2864,20 +2864,16 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) if (data[i] == 0xff) { if (i >= 4) PDEBUG(D_FRAM, - "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n", + "header found at offset: %d: %02x %02x 00 %3ph\n", i - 1, data[i - 4], data[i - 3], - data[i], - data[i + 1], - data[i + 2]); + &data[i]); else PDEBUG(D_FRAM, - "header found at offset: %d: 00 %02x %02x %02x\n", + "header found at offset: %d: 00 %3ph\n", i - 1, - data[i], - data[i + 1], - data[i + 2]); + &data[i]); return data + i + (sd->sof_len - 1); } break; diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c index cbfc2f921427..7b95d8e88a20 100644 --- a/drivers/media/usb/gspca/zc3xx.c +++ b/drivers/media/usb/gspca/zc3xx.c @@ -6700,7 +6700,8 @@ static int sd_start(struct gspca_dev *gspca_dev) }; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 6e5070774dc2..2f0c89cbac76 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -78,7 +78,8 @@ void hdpvr_delete(struct hdpvr_device *dev) static void challenge(u8 *bytes) { - u64 *i64P, tmp64; + __le64 *i64P; + u64 tmp64; uint i, idx; for (idx = 0; idx < 32; ++idx) { @@ -106,10 +107,10 @@ static void challenge(u8 *bytes) for (i = 0; i < 3; i++) bytes[1] *= bytes[6] + 1; for (i = 0; i < 3; i++) { - i64P = (u64 *)bytes; + i64P = (__le64 *)bytes; tmp64 = le64_to_cpup(i64P); - tmp64 <<= bytes[7] & 0x0f; - *i64P += cpu_to_le64(tmp64); + tmp64 = tmp64 + (tmp64 << (bytes[7] & 0x0f)); + *i64P = cpu_to_le64(tmp64); } break; } @@ -301,8 +302,6 @@ static int hdpvr_probe(struct usb_interface *interface, goto error; } - dev->workqueue = 0; - /* init video transfer queues first of all */ /* to prevent oops in hdpvr_delete() on error paths */ INIT_LIST_HEAD(&dev->free_buff_list); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index c4d51d78f837..ea05f678b559 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -2868,7 +2868,7 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id, pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \ } -v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw) +static v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw) { v4l2_std_id std; std = (v4l2_std_id)hdw->std_mask_avail; diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index 03761c6f472f..05bd91a60c09 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -209,8 +209,10 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size) struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; int dummy; - if (dev->state != SMSUSB_ACTIVE) + if (dev->state != SMSUSB_ACTIVE) { + sms_debug("Device not active yet"); return -ENOENT; + } sms_debug("sending %s(%d) size: %d", smscore_translate_msg(phdr->msg_type), phdr->msg_type, @@ -243,6 +245,9 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id) int rc, dummy; char *fw_filename; + if (id < 0) + id = sms_get_board(board_id)->default_mode; + if (id < DEVICE_MODE_DVBT || id > DEVICE_MODE_DVBT_BDA) { sms_err("invalid firmware id specified %d", id); return -EINVAL; @@ -445,14 +450,15 @@ static int smsusb_probe(struct usb_interface *intf, char devpath[32]; int i, rc; - sms_info("interface number %d", + sms_info("board id=%lu, interface number %d", + id->driver_info, intf->cur_altsetting->desc.bInterfaceNumber); if (sms_get_board(id->driver_info)->intf_num != intf->cur_altsetting->desc.bInterfaceNumber) { - sms_err("interface number is %d expecting %d", - sms_get_board(id->driver_info)->intf_num, - intf->cur_altsetting->desc.bInterfaceNumber); + sms_debug("interface %d won't be used. Expecting interface %d to popup", + intf->cur_altsetting->desc.bInterfaceNumber, + sms_get_board(id->driver_info)->intf_num); return -ENODEV; } @@ -483,22 +489,32 @@ static int smsusb_probe(struct usb_interface *intf, } if ((udev->actconfig->desc.bNumInterfaces == 2) && (intf->cur_altsetting->desc.bInterfaceNumber == 0)) { - sms_err("rom interface 0 is not used"); + sms_debug("rom interface 0 is not used"); return -ENODEV; } if (id->driver_info == SMS1XXX_BOARD_SIANO_STELLAR_ROM) { - sms_info("stellar device was found."); + /* Detected a Siano Stellar uninitialized */ + snprintf(devpath, sizeof(devpath), "usb\\%d-%s", udev->bus->busnum, udev->devpath); - sms_info("stellar device was found."); - return smsusb1_load_firmware( + sms_info("stellar device in cold state was found at %s.", devpath); + rc = smsusb1_load_firmware( udev, smscore_registry_getmode(devpath), id->driver_info); + + /* This device will reset and gain another USB ID */ + if (!rc) + sms_info("stellar device now in warm state"); + else + sms_err("Failed to put stellar in warm state. Error: %d", rc); + + return rc; + } else { + rc = smsusb_init_device(intf, id->driver_info); } - rc = smsusb_init_device(intf, id->driver_info); - sms_info("rc %d", rc); + sms_info("Device initialized with return code %d", rc); sms_board_load_modules(id->driver_info); return rc; } @@ -550,10 +566,13 @@ static int smsusb_resume(struct usb_interface *intf) } static const struct usb_device_id smsusb_id_table[] = { + /* This device is only present before firmware load */ { USB_DEVICE(0x187f, 0x0010), - .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, + .driver_info = SMS1XXX_BOARD_SIANO_STELLAR_ROM }, + /* This device pops up after firmware load */ { USB_DEVICE(0x187f, 0x0100), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, + { USB_DEVICE(0x187f, 0x0200), .driver_info = SMS1XXX_BOARD_SIANO_NOVA_A }, { USB_DEVICE(0x187f, 0x0201), diff --git a/drivers/media/usb/tlg2300/pd-main.c b/drivers/media/usb/tlg2300/pd-main.c index 95f94e5aa66d..3316caa4733b 100644 --- a/drivers/media/usb/tlg2300/pd-main.c +++ b/drivers/media/usb/tlg2300/pd-main.c @@ -232,7 +232,7 @@ static int firmware_download(struct usb_device *udev) goto out; } - max_packet_size = udev->ep_out[0x1]->desc.wMaxPacketSize; + max_packet_size = le16_to_cpu(udev->ep_out[0x1]->desc.wMaxPacketSize); log("\t\t download size : %d", (int)max_packet_size); for (offset = 0; offset < fwlength; offset += max_packet_size) { diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index e52c3b97f304..29724af9b9ab 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -366,7 +366,7 @@ static int ttusb_dec_get_stb_state (struct ttusb_dec *dec, unsigned int *mode, } return 0; } else { - return -1; + return -ENOENT; } } @@ -1241,6 +1241,8 @@ static void ttusb_dec_init_v_pes(struct ttusb_dec *dec) static int ttusb_dec_init_usb(struct ttusb_dec *dec) { + int result; + dprintk("%s\n", __func__); mutex_init(&dec->usb_mutex); @@ -1258,7 +1260,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec) return -ENOMEM; } dec->irq_buffer = usb_alloc_coherent(dec->udev,IRQ_PACKET_SIZE, - GFP_ATOMIC, &dec->irq_dma_handle); + GFP_KERNEL, &dec->irq_dma_handle); if(!dec->irq_buffer) { usb_free_urb(dec->irq_urb); return -ENOMEM; @@ -1270,7 +1272,13 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec) dec->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } - return ttusb_dec_alloc_iso_urbs(dec); + result = ttusb_dec_alloc_iso_urbs(dec); + if (result) { + usb_free_urb(dec->irq_urb); + usb_free_coherent(dec->udev, IRQ_PACKET_SIZE, + dec->irq_buffer, dec->irq_dma_handle); + } + return result; } static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) @@ -1293,10 +1301,11 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) dprintk("%s\n", __func__); - if (request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev)) { + result = request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev); + if (result) { printk(KERN_ERR "%s: Firmware (%s) unavailable.\n", __func__, dec->firmware_name); - return 1; + return result; } firmware = fw_entry->data; @@ -1306,7 +1315,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) printk("%s: firmware size too small for DSP code (%zu < 60).\n", __func__, firmware_size); release_firmware(fw_entry); - return -1; + return -ENOENT; } /* a 32 bit checksum over the first 56 bytes of the DSP Code is stored @@ -1320,7 +1329,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) "0x%08x != 0x%08x in file), file invalid.\n", __func__, crc32_csum, crc32_check); release_firmware(fw_entry); - return -1; + return -ENOENT; } memcpy(idstring, &firmware[36], 20); idstring[20] = '\0'; @@ -1389,55 +1398,48 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec) dprintk("%s\n", __func__); result = ttusb_dec_get_stb_state(dec, &mode, &model, &version); + if (result) + return result; - if (!result) { - if (!mode) { - if (version == 0xABCDEFAB) - printk(KERN_INFO "ttusb_dec: no version " - "info in Firmware\n"); - else - printk(KERN_INFO "ttusb_dec: Firmware " - "%x.%02x%c%c\n", - version >> 24, (version >> 16) & 0xff, - (version >> 8) & 0xff, version & 0xff); - - result = ttusb_dec_boot_dsp(dec); - if (result) - return result; - else - return 1; - } else { - /* We can't trust the USB IDs that some firmwares - give the box */ - switch (model) { - case 0x00070001: - case 0x00070008: - case 0x0007000c: - ttusb_dec_set_model(dec, TTUSB_DEC3000S); - break; - case 0x00070009: - case 0x00070013: - ttusb_dec_set_model(dec, TTUSB_DEC2000T); - break; - case 0x00070011: - ttusb_dec_set_model(dec, TTUSB_DEC2540T); - break; - default: - printk(KERN_ERR "%s: unknown model returned " - "by firmware (%08x) - please report\n", - __func__, model); - return -1; - break; - } + if (!mode) { + if (version == 0xABCDEFAB) + printk(KERN_INFO "ttusb_dec: no version " + "info in Firmware\n"); + else + printk(KERN_INFO "ttusb_dec: Firmware " + "%x.%02x%c%c\n", + version >> 24, (version >> 16) & 0xff, + (version >> 8) & 0xff, version & 0xff); + result = ttusb_dec_boot_dsp(dec); + if (result) + return result; + } else { + /* We can't trust the USB IDs that some firmwares + give the box */ + switch (model) { + case 0x00070001: + case 0x00070008: + case 0x0007000c: + ttusb_dec_set_model(dec, TTUSB_DEC3000S); + break; + case 0x00070009: + case 0x00070013: + ttusb_dec_set_model(dec, TTUSB_DEC2000T); + break; + case 0x00070011: + ttusb_dec_set_model(dec, TTUSB_DEC2540T); + break; + default: + printk(KERN_ERR "%s: unknown model returned " + "by firmware (%08x) - please report\n", + __func__, model); + return -ENOENT; + } if (version >= 0x01770000) dec->can_playback = 1; - - return 0; - } } - else - return result; + return 0; } static int ttusb_dec_init_dvb(struct ttusb_dec *dec) @@ -1539,19 +1541,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec) static void ttusb_dec_exit_rc(struct ttusb_dec *dec) { - dprintk("%s\n", __func__); - /* we have to check whether the irq URB is already submitted. - * As the irq is submitted after the interface is changed, - * this is the best method i figured out. - * Any others?*/ - if (dec->interface == TTUSB_DEC_INTERFACE_IN) - usb_kill_urb(dec->irq_urb); - - usb_free_urb(dec->irq_urb); - - usb_free_coherent(dec->udev,IRQ_PACKET_SIZE, - dec->irq_buffer, dec->irq_dma_handle); if (dec->rc_input_dev) { input_unregister_device(dec->rc_input_dev); @@ -1566,6 +1556,20 @@ static void ttusb_dec_exit_usb(struct ttusb_dec *dec) dprintk("%s\n", __func__); + if (enable_rc) { + /* we have to check whether the irq URB is already submitted. + * As the irq is submitted after the interface is changed, + * this is the best method i figured out. + * Any others?*/ + if (dec->interface == TTUSB_DEC_INTERFACE_IN) + usb_kill_urb(dec->irq_urb); + + usb_free_urb(dec->irq_urb); + + usb_free_coherent(dec->udev, IRQ_PACKET_SIZE, + dec->irq_buffer, dec->irq_dma_handle); + } + dec->iso_stream_count = 0; for (i = 0; i < ISO_BUF_COUNT; i++) @@ -1623,6 +1627,7 @@ static int ttusb_dec_probe(struct usb_interface *intf, { struct usb_device *udev; struct ttusb_dec *dec; + int result; dprintk("%s\n", __func__); @@ -1651,13 +1656,15 @@ static int ttusb_dec_probe(struct usb_interface *intf, dec->udev = udev; - if (ttusb_dec_init_usb(dec)) - return 0; - if (ttusb_dec_init_stb(dec)) { - ttusb_dec_exit_usb(dec); - return 0; - } - ttusb_dec_init_dvb(dec); + result = ttusb_dec_init_usb(dec); + if (result) + goto err_usb; + result = ttusb_dec_init_stb(dec); + if (result) + goto err_stb; + result = ttusb_dec_init_dvb(dec); + if (result) + goto err_stb; dec->adapter.priv = dec; switch (id->idProduct) { @@ -1696,6 +1703,11 @@ static int ttusb_dec_probe(struct usb_interface *intf, ttusb_init_rc(dec); return 0; +err_stb: + ttusb_dec_exit_usb(dec); +err_usb: + kfree(dec); + return result; } static void ttusb_dec_disconnect(struct usb_interface *intf) diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index a2f4501c23ca..0eb82106d2ff 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -664,7 +664,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = { .size = 32, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, - .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, + .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_TILT_ABSOLUTE, @@ -674,7 +674,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = { .size = 32, .offset = 32, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, - .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, + .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_PRIVACY, diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 3394c3432011..899cb6d1c4a4 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -680,7 +680,8 @@ void uvc_video_clock_update(struct uvc_streaming *stream, stream->dev->name, sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC, - v4l2_buf->timestamp.tv_sec, v4l2_buf->timestamp.tv_usec, + v4l2_buf->timestamp.tv_sec, + (unsigned long)v4l2_buf->timestamp.tv_usec, x1, first->host_sof, first->dev_sof, x2, last->host_sof, last->dev_sof, y1, y2); diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index ddc9379eb276..20c09229a08e 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -43,7 +43,7 @@ #define UNSET (-1U) -#define PREFIX (t->i2c->driver->driver.name) +#define PREFIX (t->i2c->dev.driver->name) /* * Driver modprobe parameters @@ -247,7 +247,7 @@ static const struct analog_demod_ops tuner_analog_ops = { /** * set_type - Sets the tuner type for a given device * - * @c: i2c_client descriptoy + * @c: i2c_client descriptor * @type: type of the tuner (e. g. tuner number) * @new_mode_mask: Indicates if tuner supports TV and/or Radio * @new_config: an optional parameter used by a few tuners to adjust @@ -452,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type, } tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", - c->adapter->name, c->driver->driver.name, c->addr << 1, type, + c->adapter->name, c->dev.driver->name, c->addr << 1, type, t->mode_mask); return; @@ -556,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap, int mode_mask; if (pos->i2c->adapter != adap || - strcmp(pos->i2c->driver->driver.name, "tuner")) + strcmp(pos->i2c->dev.driver->name, "tuner")) continue; mode_mask = pos->mode_mask; diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index c85d69da35bd..85a6a34128a8 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -189,30 +189,53 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) struct v4l2_subdev *sd, *tmp; unsigned int notif_n_subdev = notifier->num_subdevs; unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS); - struct device *dev[n_subdev]; + struct device **dev; int i = 0; if (!notifier->v4l2_dev) return; + dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL); + if (!dev) { + dev_err(notifier->v4l2_dev->dev, + "Failed to allocate device cache!\n"); + } + mutex_lock(&list_lock); list_del(¬ifier->list); list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) { - dev[i] = get_device(sd->dev); + struct device *d; + + d = get_device(sd->dev); v4l2_async_cleanup(sd); /* If we handled USB devices, we'd have to lock the parent too */ - device_release_driver(dev[i++]); + device_release_driver(d); if (notifier->unbind) notifier->unbind(notifier, sd, sd->asd); + + /* + * Store device at the device cache, in order to call + * put_device() on the final step + */ + if (dev) + dev[i++] = d; + else + put_device(d); } mutex_unlock(&list_lock); + /* + * Call device_attach() to reprobe devices + * + * NOTE: If dev allocation fails, i is 0, and the whole loop won't be + * executed. + */ while (i--) { struct device *d = dev[i]; @@ -228,6 +251,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) } put_device(d); } + kfree(dev); notifier->v4l2_dev = NULL; diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c index b67de8642b5a..e18cc0469cf8 100644 --- a/drivers/media/v4l2-core/v4l2-clk.c +++ b/drivers/media/v4l2-core/v4l2-clk.c @@ -240,3 +240,42 @@ void v4l2_clk_unregister(struct v4l2_clk *clk) kfree(clk); } EXPORT_SYMBOL(v4l2_clk_unregister); + +struct v4l2_clk_fixed { + unsigned long rate; + struct v4l2_clk_ops ops; +}; + +static unsigned long fixed_get_rate(struct v4l2_clk *clk) +{ + struct v4l2_clk_fixed *priv = clk->priv; + return priv->rate; +} + +struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id, + const char *id, unsigned long rate, struct module *owner) +{ + struct v4l2_clk *clk; + struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL); + + if (!priv) + return ERR_PTR(-ENOMEM); + + priv->rate = rate; + priv->ops.get_rate = fixed_get_rate; + priv->ops.owner = owner; + + clk = v4l2_clk_register(&priv->ops, dev_id, id, priv); + if (IS_ERR(clk)) + kfree(priv); + + return clk; +} +EXPORT_SYMBOL(__v4l2_clk_register_fixed); + +void v4l2_clk_unregister_fixed(struct v4l2_clk *clk) +{ + kfree(clk->priv); + v4l2_clk_unregister(clk); +} +EXPORT_SYMBOL(v4l2_clk_unregister_fixed); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 037d7a55aa8c..433d6d77942e 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -236,14 +236,14 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, v4l2_subdev_init(sd, ops); sd->flags |= V4L2_SUBDEV_FL_IS_I2C; /* the owner is the same as the i2c_client's driver owner */ - sd->owner = client->driver->driver.owner; + sd->owner = client->dev.driver->owner; sd->dev = &client->dev; /* i2c_client and v4l2_subdev point to one another */ v4l2_set_subdevdata(sd, client); i2c_set_clientdata(client, sd); /* initialize name */ snprintf(sd->name, sizeof(sd->name), "%s %d-%04x", - client->driver->driver.name, i2c_adapter_id(client->adapter), + client->dev.driver->name, i2c_adapter_id(client->adapter), client->addr); } EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); @@ -274,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, loaded. This delay-load mechanism doesn't work if other drivers want to use the i2c device, so explicitly loading the module is the best alternative. */ - if (client == NULL || client->driver == NULL) + if (client == NULL || client->dev.driver == NULL) goto error; /* Lock the module so we can safely get the v4l2_subdev pointer */ - if (!try_module_get(client->driver->driver.owner)) + if (!try_module_get(client->dev.driver->owner)) goto error; sd = i2c_get_clientdata(client); @@ -287,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, if (v4l2_device_register_subdev(v4l2_dev, sd)) sd = NULL; /* Decrease the module use count to match the first try_module_get. */ - module_put(client->driver->driver.owner); + module_put(client->dev.driver->owner); error: /* If we have a client but no subdev, then something went wrong and diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index c3f080388684..60dcc0f3b32e 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -565,13 +565,13 @@ EXPORT_SYMBOL(v4l2_ctrl_get_menu); * Returns NULL or an s64 type array containing the menu for given * control ID. The total number of the menu items is returned in @len. */ -const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len) +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len) { - static const s64 const qmenu_int_vpx_num_partitions[] = { + static const s64 qmenu_int_vpx_num_partitions[] = { 1, 2, 4, 8, }; - static const s64 const qmenu_int_vpx_num_ref_frames[] = { + static const s64 qmenu_int_vpx_num_ref_frames[] = { 1, 2, 3, }; @@ -583,7 +583,7 @@ const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len) default: *len = 0; return NULL; - }; + } } EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 7c4371288215..73035ee0f4de 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -41,6 +41,8 @@ module_param(debug, bool, 0644); #define TRANS_QUEUED (1 << 0) /* Instance is currently running in hardware */ #define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) /* Offset base for buffers on the destination queue - used to distinguish @@ -221,6 +223,14 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) } spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + + /* If the context is aborted then don't schedule it */ + if (m2m_ctx->job_flags & TRANS_ABORT) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + dprintk("Aborted context\n"); + return; + } + if (m2m_ctx->job_flags & TRANS_QUEUED) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("On job queue already\n"); @@ -280,6 +290,8 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) m2m_dev = m2m_ctx->m2m_dev; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + + m2m_ctx->job_flags |= TRANS_ABORT; if (m2m_ctx->job_flags & TRANS_RUNNING) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); @@ -480,13 +492,15 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, m2m_dev = m2m_ctx->m2m_dev; spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); /* We should not be scheduled anymore, since we're dropping a queue. */ - INIT_LIST_HEAD(&m2m_ctx->queue); + if (m2m_ctx->job_flags & TRANS_QUEUED) + list_del(&m2m_ctx->queue); m2m_ctx->job_flags = 0; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); /* Drop queue, since streamoff returns device to the same state as after * calling reqbufs. */ INIT_LIST_HEAD(&q_ctx->rdy_queue); + q_ctx->num_rdy = 0; spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); if (m2m_dev->curr_ctx == m2m_ctx) { diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index de0e87f0b2c3..b19b306c8f7f 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -241,7 +241,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, q->bufs[q->num_buffers + buffer] = vb; } - __setup_offsets(q, buffer); + if (memory == V4L2_MEMORY_MMAP) + __setup_offsets(q, buffer); dprintk(1, "Allocated %d buffers, %d plane(s) each\n", buffer, num_planes); @@ -1015,6 +1016,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) /* Check if the provided plane buffer is large enough */ if (planes[plane].length < q->plane_sizes[plane]) { + dprintk(1, "qbuf: provided buffer size %u is less than " + "setup size %u for plane %d\n", + planes[plane].length, + q->plane_sizes[plane], plane); ret = -EINVAL; goto err; } @@ -1205,8 +1210,11 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) int ret; ret = __verify_length(vb, b); - if (ret < 0) + if (ret < 0) { + dprintk(1, "%s(): plane parameters verification failed: %d\n", + __func__, ret); return ret; + } switch (q->memory) { case V4L2_MEMORY_MMAP: @@ -2469,10 +2477,11 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, } EXPORT_SYMBOL_GPL(vb2_read); -size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, loff_t *ppos, int nonblocking) { - return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0); + return __vb2_perform_fileio(q, (char __user *) data, count, + ppos, nonblocking, 0); } EXPORT_SYMBOL_GPL(vb2_write); @@ -2633,7 +2642,7 @@ int vb2_fop_release(struct file *file) } EXPORT_SYMBOL_GPL(vb2_fop_release); -ssize_t vb2_fop_write(struct file *file, char __user *buf, +ssize_t vb2_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct video_device *vdev = video_devdata(file); diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 16ae3dcc7e29..2f860543912c 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -35,17 +35,61 @@ struct vb2_dma_sg_buf { struct page **pages; int write; int offset; - struct vb2_dma_sg_desc sg_desc; + struct sg_table sg_table; + size_t size; + unsigned int num_pages; atomic_t refcount; struct vb2_vmarea_handler handler; }; static void vb2_dma_sg_put(void *buf_priv); +static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, + gfp_t gfp_flags) +{ + unsigned int last_page = 0; + int size = buf->size; + + while (size > 0) { + struct page *pages; + int order; + int i; + + order = get_order(size); + /* Dont over allocate*/ + if ((PAGE_SIZE << order) > size) + order--; + + pages = NULL; + while (!pages) { + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | + __GFP_NOWARN | gfp_flags, order); + if (pages) + break; + + if (order == 0) { + while (last_page--) + __free_page(buf->pages[last_page]); + return -ENOMEM; + } + order--; + } + + split_page(pages, order); + for (i = 0; i < (1 << order); i++) + buf->pages[last_page++] = &pages[i]; + + size -= PAGE_SIZE << order; + } + + return 0; +} + static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) { struct vb2_dma_sg_buf *buf; - int i; + int ret; + int num_pages; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) @@ -54,29 +98,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla buf->vaddr = NULL; buf->write = 0; buf->offset = 0; - buf->sg_desc.size = size; + buf->size = size; /* size is already page aligned */ - buf->sg_desc.num_pages = size >> PAGE_SHIFT; + buf->num_pages = size >> PAGE_SHIFT; - buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages * - sizeof(*buf->sg_desc.sglist)); - if (!buf->sg_desc.sglist) - goto fail_sglist_alloc; - sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); - - buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), + buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto fail_pages_array_alloc; - for (i = 0; i < buf->sg_desc.num_pages; ++i) { - buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | - __GFP_NOWARN | gfp_flags); - if (NULL == buf->pages[i]) - goto fail_pages_alloc; - sg_set_page(&buf->sg_desc.sglist[i], - buf->pages[i], PAGE_SIZE, 0); - } + ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); + if (ret) + goto fail_pages_alloc; + + ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + buf->num_pages, 0, size, gfp_flags); + if (ret) + goto fail_table_alloc; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dma_sg_put; @@ -85,18 +123,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla atomic_inc(&buf->refcount); dprintk(1, "%s: Allocated buffer of %d pages\n", - __func__, buf->sg_desc.num_pages); + __func__, buf->num_pages); return buf; +fail_table_alloc: + num_pages = buf->num_pages; + while (num_pages--) + __free_page(buf->pages[num_pages]); fail_pages_alloc: - while (--i >= 0) - __free_page(buf->pages[i]); kfree(buf->pages); - fail_pages_array_alloc: - vfree(buf->sg_desc.sglist); - -fail_sglist_alloc: kfree(buf); return NULL; } @@ -104,14 +140,14 @@ fail_sglist_alloc: static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - int i = buf->sg_desc.num_pages; + int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, - buf->sg_desc.num_pages); + buf->num_pages); if (buf->vaddr) - vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); - vfree(buf->sg_desc.sglist); + vm_unmap_ram(buf->vaddr, buf->num_pages); + sg_free_table(&buf->sg_table); while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); @@ -124,7 +160,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, { struct vb2_dma_sg_buf *buf; unsigned long first, last; - int num_pages_from_user, i; + int num_pages_from_user; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) @@ -133,56 +169,41 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, buf->vaddr = NULL; buf->write = write; buf->offset = vaddr & ~PAGE_MASK; - buf->sg_desc.size = size; + buf->size = size; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; - buf->sg_desc.num_pages = last - first + 1; - - buf->sg_desc.sglist = vzalloc( - buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist)); - if (!buf->sg_desc.sglist) - goto userptr_fail_sglist_alloc; - - sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); + buf->num_pages = last - first + 1; - buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), + buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) - goto userptr_fail_pages_array_alloc; + return NULL; num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, - buf->sg_desc.num_pages, + buf->num_pages, write, 1, /* force */ buf->pages, NULL); - if (num_pages_from_user != buf->sg_desc.num_pages) + if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; - sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0], - PAGE_SIZE - buf->offset, buf->offset); - size -= PAGE_SIZE - buf->offset; - for (i = 1; i < buf->sg_desc.num_pages; ++i) { - sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i], - min_t(size_t, PAGE_SIZE, size), 0); - size -= min_t(size_t, PAGE_SIZE, size); - } + if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + buf->num_pages, buf->offset, size, 0)) + goto userptr_fail_alloc_table_from_pages; + return buf; +userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", - num_pages_from_user, buf->sg_desc.num_pages); + num_pages_from_user, buf->num_pages); while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); kfree(buf->pages); - -userptr_fail_pages_array_alloc: - vfree(buf->sg_desc.sglist); - -userptr_fail_sglist_alloc: kfree(buf); return NULL; } @@ -194,18 +215,18 @@ userptr_fail_sglist_alloc: static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - int i = buf->sg_desc.num_pages; + int i = buf->num_pages; dprintk(1, "%s: Releasing userspace buffer of %d pages\n", - __func__, buf->sg_desc.num_pages); + __func__, buf->num_pages); if (buf->vaddr) - vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); + vm_unmap_ram(buf->vaddr, buf->num_pages); + sg_free_table(&buf->sg_table); while (--i >= 0) { if (buf->write) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } - vfree(buf->sg_desc.sglist); kfree(buf->pages); kfree(buf); } @@ -218,7 +239,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv) if (!buf->vaddr) buf->vaddr = vm_map_ram(buf->pages, - buf->sg_desc.num_pages, + buf->num_pages, -1, PAGE_KERNEL); @@ -274,7 +295,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - return &buf->sg_desc; + return &buf->sg_table; } const struct vb2_mem_ops vb2_dma_sg_memops = { diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 7ebe9ef1eba6..c9b1f6422941 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -1247,7 +1247,7 @@ static struct i2c_driver pm860x_driver = { .name = "88PM860x", .owner = THIS_MODULE, .pm = &pm860x_pm_ops, - .of_match_table = of_match_ptr(pm860x_dt_ids), + .of_match_table = pm860x_dt_ids, }, .probe = pm860x_probe, .remove = pm860x_remove, diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 914c3d142f78..62a60caa5d1f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -27,6 +27,18 @@ config MFD_AS3711 help Support for the AS3711 PMIC from AMS +config MFD_AS3722 + bool "ams AS3722 Power Management IC" + select MFD_CORE + select REGMAP_I2C + select REGMAP_IRQ + depends on I2C && OF + help + The ams AS3722 is a compact system PMU suitable for mobile phones, + tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down + controllers, 11 LDOs, RTC, automatic battery, temperature and + over current monitoring, GPIOs, ADC and a watchdog. + config PMIC_ADP5520 bool "Analog Devices ADP5520/01 MFD PMIC Core Support" depends on I2C=y @@ -664,14 +676,14 @@ menu "STMicroelectronics STMPE Interface Drivers" depends on MFD_STMPE config STMPE_I2C - bool "STMicroelectronics STMPE I2C Inteface" + bool "STMicroelectronics STMPE I2C Interface" depends on I2C=y default y help This is used to enable I2C interface of STMPE config STMPE_SPI - bool "STMicroelectronics STMPE SPI Inteface" + bool "STMicroelectronics STMPE SPI Interface" depends on SPI_MASTER help This is used to enable SPI interface of STMPE @@ -1151,6 +1163,16 @@ config MFD_WM8994 core support for the WM8994, in order to use the actual functionaltiy of the device other drivers must be enabled. +config MFD_STW481X + bool "Support for ST Microelectronics STw481x" + depends on I2C && ARCH_NOMADIK + select REGMAP_I2C + select MFD_CORE + help + Select this option to enable the STw481x chip driver used + in various ST Microelectronics and ST-Ericsson embedded + Nomadik series. + endmenu endif diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 15b905c6553c..8a28dc90fe78 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -162,3 +162,5 @@ obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o obj-$(CONFIG_MFD_RETU) += retu-mfd.o obj-$(CONFIG_MFD_AS3711) += as3711.o +obj-$(CONFIG_MFD_AS3722) += as3722.o +obj-$(CONFIG_MFD_STW481X) += stw481x.o diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c index 6f68472e0ca6..14d9542a4eed 100644 --- a/drivers/mfd/aat2870-core.c +++ b/drivers/mfd/aat2870-core.c @@ -293,7 +293,7 @@ static ssize_t aat2870_reg_write_file(struct file *file, unsigned long addr, val; int ret; - buf_size = min(count, (sizeof(buf)-1)); + buf_size = min(count, (size_t)(sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) { dev_err(aat2870->dev, "Failed to copy from user\n"); return -EFAULT; diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 022b1863d36c..75e180ceecf3 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -540,7 +540,7 @@ static int arizona_of_get_core_pdata(struct arizona *arizona) for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) { if (arizona->pdata.gpio_defaults[i] > 0xffff) arizona->pdata.gpio_defaults[i] = 0; - if (arizona->pdata.gpio_defaults[i] == 0) + else if (arizona->pdata.gpio_defaults[i] == 0) arizona->pdata.gpio_defaults[i] = 0x10000; } } else { @@ -633,11 +633,11 @@ int arizona_dev_init(struct arizona *arizona) dev_set_drvdata(arizona->dev, arizona); mutex_init(&arizona->clk_lock); - arizona_of_get_core_pdata(arizona); - if (dev_get_platdata(arizona->dev)) memcpy(&arizona->pdata, dev_get_platdata(arizona->dev), sizeof(arizona->pdata)); + else + arizona_of_get_core_pdata(arizona); regcache_cache_only(arizona->regmap, true); diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c index 51dbabf7c021..beccb790c9ba 100644 --- a/drivers/mfd/arizona-i2c.c +++ b/drivers/mfd/arizona-i2c.c @@ -17,6 +17,7 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/mfd/arizona/core.h> diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c index 47be7b35b5c5..1ca554b18bef 100644 --- a/drivers/mfd/arizona-spi.c +++ b/drivers/mfd/arizona-spi.c @@ -17,6 +17,7 @@ #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> +#include <linux/of.h> #include <linux/mfd/arizona/core.h> diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c index abd3ab7c0908..ec684fcedb42 100644 --- a/drivers/mfd/as3711.c +++ b/drivers/mfd/as3711.c @@ -17,6 +17,7 @@ #include <linux/mfd/as3711.h> #include <linux/mfd/core.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/regmap.h> #include <linux/slab.h> diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c new file mode 100644 index 000000000000..f161f2e00df7 --- /dev/null +++ b/drivers/mfd/as3722.c @@ -0,0 +1,449 @@ +/* + * Core driver for ams AS3722 PMICs + * + * Copyright (C) 2013 AMS AG + * Copyright (c) 2013, NVIDIA Corporation. All rights reserved. + * + * Author: Florian Lobmaier <florian.lobmaier@ams.com> + * Author: Laxman Dewangan <ldewangan@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mfd/core.h> +#include <linux/mfd/as3722.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define AS3722_DEVICE_ID 0x0C + +static const struct resource as3722_rtc_resource[] = { + { + .name = "as3722-rtc-alarm", + .start = AS3722_IRQ_RTC_ALARM, + .end = AS3722_IRQ_RTC_ALARM, + .flags = IORESOURCE_IRQ, + }, +}; + +static const struct resource as3722_adc_resource[] = { + { + .name = "as3722-adc", + .start = AS3722_IRQ_ADC, + .end = AS3722_IRQ_ADC, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell as3722_devs[] = { + { + .name = "as3722-pinctrl", + }, + { + .name = "as3722-regulator", + }, + { + .name = "as3722-rtc", + .num_resources = ARRAY_SIZE(as3722_rtc_resource), + .resources = as3722_rtc_resource, + }, + { + .name = "as3722-adc", + .num_resources = ARRAY_SIZE(as3722_adc_resource), + .resources = as3722_adc_resource, + }, + { + .name = "as3722-power-off", + }, +}; + +static const struct regmap_irq as3722_irqs[] = { + /* INT1 IRQs */ + [AS3722_IRQ_LID] = { + .mask = AS3722_INTERRUPT_MASK1_LID, + }, + [AS3722_IRQ_ACOK] = { + .mask = AS3722_INTERRUPT_MASK1_ACOK, + }, + [AS3722_IRQ_ENABLE1] = { + .mask = AS3722_INTERRUPT_MASK1_ENABLE1, + }, + [AS3722_IRQ_OCCUR_ALARM_SD0] = { + .mask = AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0, + }, + [AS3722_IRQ_ONKEY_LONG_PRESS] = { + .mask = AS3722_INTERRUPT_MASK1_ONKEY_LONG, + }, + [AS3722_IRQ_ONKEY] = { + .mask = AS3722_INTERRUPT_MASK1_ONKEY, + }, + [AS3722_IRQ_OVTMP] = { + .mask = AS3722_INTERRUPT_MASK1_OVTMP, + }, + [AS3722_IRQ_LOWBAT] = { + .mask = AS3722_INTERRUPT_MASK1_LOWBAT, + }, + + /* INT2 IRQs */ + [AS3722_IRQ_SD0_LV] = { + .mask = AS3722_INTERRUPT_MASK2_SD0_LV, + .reg_offset = 1, + }, + [AS3722_IRQ_SD1_LV] = { + .mask = AS3722_INTERRUPT_MASK2_SD1_LV, + .reg_offset = 1, + }, + [AS3722_IRQ_SD2_LV] = { + .mask = AS3722_INTERRUPT_MASK2_SD2345_LV, + .reg_offset = 1, + }, + [AS3722_IRQ_PWM1_OV_PROT] = { + .mask = AS3722_INTERRUPT_MASK2_PWM1_OV_PROT, + .reg_offset = 1, + }, + [AS3722_IRQ_PWM2_OV_PROT] = { + .mask = AS3722_INTERRUPT_MASK2_PWM2_OV_PROT, + .reg_offset = 1, + }, + [AS3722_IRQ_ENABLE2] = { + .mask = AS3722_INTERRUPT_MASK2_ENABLE2, + .reg_offset = 1, + }, + [AS3722_IRQ_SD6_LV] = { + .mask = AS3722_INTERRUPT_MASK2_SD6_LV, + .reg_offset = 1, + }, + [AS3722_IRQ_RTC_REP] = { + .mask = AS3722_INTERRUPT_MASK2_RTC_REP, + .reg_offset = 1, + }, + + /* INT3 IRQs */ + [AS3722_IRQ_RTC_ALARM] = { + .mask = AS3722_INTERRUPT_MASK3_RTC_ALARM, + .reg_offset = 2, + }, + [AS3722_IRQ_GPIO1] = { + .mask = AS3722_INTERRUPT_MASK3_GPIO1, + .reg_offset = 2, + }, + [AS3722_IRQ_GPIO2] = { + .mask = AS3722_INTERRUPT_MASK3_GPIO2, + .reg_offset = 2, + }, + [AS3722_IRQ_GPIO3] = { + .mask = AS3722_INTERRUPT_MASK3_GPIO3, + .reg_offset = 2, + }, + [AS3722_IRQ_GPIO4] = { + .mask = AS3722_INTERRUPT_MASK3_GPIO4, + .reg_offset = 2, + }, + [AS3722_IRQ_GPIO5] = { + .mask = AS3722_INTERRUPT_MASK3_GPIO5, + .reg_offset = 2, + }, + [AS3722_IRQ_WATCHDOG] = { + .mask = AS3722_INTERRUPT_MASK3_WATCHDOG, + .reg_offset = 2, + }, + [AS3722_IRQ_ENABLE3] = { + .mask = AS3722_INTERRUPT_MASK3_ENABLE3, + .reg_offset = 2, + }, + + /* INT4 IRQs */ + [AS3722_IRQ_TEMP_SD0_SHUTDOWN] = { + .mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN, + .reg_offset = 3, + }, + [AS3722_IRQ_TEMP_SD1_SHUTDOWN] = { + .mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN, + .reg_offset = 3, + }, + [AS3722_IRQ_TEMP_SD2_SHUTDOWN] = { + .mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN, + .reg_offset = 3, + }, + [AS3722_IRQ_TEMP_SD0_ALARM] = { + .mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM, + .reg_offset = 3, + }, + [AS3722_IRQ_TEMP_SD1_ALARM] = { + .mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM, + .reg_offset = 3, + }, + [AS3722_IRQ_TEMP_SD6_ALARM] = { + .mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM, + .reg_offset = 3, + }, + [AS3722_IRQ_OCCUR_ALARM_SD6] = { + .mask = AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6, + .reg_offset = 3, + }, + [AS3722_IRQ_ADC] = { + .mask = AS3722_INTERRUPT_MASK4_ADC, + .reg_offset = 3, + }, +}; + +static const struct regmap_irq_chip as3722_irq_chip = { + .name = "as3722", + .irqs = as3722_irqs, + .num_irqs = ARRAY_SIZE(as3722_irqs), + .num_regs = 4, + .status_base = AS3722_INTERRUPT_STATUS1_REG, + .mask_base = AS3722_INTERRUPT_MASK1_REG, +}; + +static int as3722_check_device_id(struct as3722 *as3722) +{ + u32 val; + int ret; + + /* Check that this is actually a AS3722 */ + ret = as3722_read(as3722, AS3722_ASIC_ID1_REG, &val); + if (ret < 0) { + dev_err(as3722->dev, "ASIC_ID1 read failed: %d\n", ret); + return ret; + } + + if (val != AS3722_DEVICE_ID) { + dev_err(as3722->dev, "Device is not AS3722, ID is 0x%x\n", val); + return -ENODEV; + } + + ret = as3722_read(as3722, AS3722_ASIC_ID2_REG, &val); + if (ret < 0) { + dev_err(as3722->dev, "ASIC_ID2 read failed: %d\n", ret); + return ret; + } + + dev_info(as3722->dev, "AS3722 with revision 0x%x found\n", val); + return 0; +} + +static int as3722_configure_pullups(struct as3722 *as3722) +{ + int ret; + u32 val = 0; + + if (as3722->en_intern_int_pullup) + val |= AS3722_INT_PULL_UP; + if (as3722->en_intern_i2c_pullup) + val |= AS3722_I2C_PULL_UP; + + ret = as3722_update_bits(as3722, AS3722_IOVOLTAGE_REG, + AS3722_INT_PULL_UP | AS3722_I2C_PULL_UP, val); + if (ret < 0) + dev_err(as3722->dev, "IOVOLTAGE_REG update failed: %d\n", ret); + return ret; +} + +static const struct regmap_range as3722_readable_ranges[] = { + regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG), + regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG), + regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_REG_SEQU_MOD3_REG), + regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG), + regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG), + regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG, + AS3722_BATTERY_VOLTAGE_MONITOR2_REG), + regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG), + regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG), + regmap_reg_range(AS3722_RTC_ACCESS_REG, AS3722_RTC_ACCESS_REG), + regmap_reg_range(AS3722_RTC_STATUS_REG, AS3722_TEMP_STATUS_REG), + regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC_CONFIGURATION_REG), + regmap_reg_range(AS3722_ASIC_ID1_REG, AS3722_ASIC_ID2_REG), + regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG), +}; + +static const struct regmap_access_table as3722_readable_table = { + .yes_ranges = as3722_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(as3722_readable_ranges), +}; + +static const struct regmap_range as3722_writable_ranges[] = { + regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG), + regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG), + regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_GPIO_SIGNAL_OUT_REG), + regmap_reg_range(AS3722_REG_SEQU_MOD1_REG, AS3722_REG_SEQU_MOD3_REG), + regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG), + regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG), + regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG, + AS3722_BATTERY_VOLTAGE_MONITOR2_REG), + regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG), + regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG), + regmap_reg_range(AS3722_INTERRUPT_MASK1_REG, AS3722_TEMP_STATUS_REG), + regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC1_CONTROL_REG), + regmap_reg_range(AS3722_ADC1_THRESHOLD_HI_MSB_REG, + AS3722_ADC_CONFIGURATION_REG), + regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG), +}; + +static const struct regmap_access_table as3722_writable_table = { + .yes_ranges = as3722_writable_ranges, + .n_yes_ranges = ARRAY_SIZE(as3722_writable_ranges), +}; + +static const struct regmap_range as3722_cacheable_ranges[] = { + regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_LDO11_VOLTAGE_REG), + regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_LDOCONTROL1_REG), +}; + +static const struct regmap_access_table as3722_volatile_table = { + .no_ranges = as3722_cacheable_ranges, + .n_no_ranges = ARRAY_SIZE(as3722_cacheable_ranges), +}; + +static const struct regmap_config as3722_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = AS3722_MAX_REGISTER, + .cache_type = REGCACHE_RBTREE, + .rd_table = &as3722_readable_table, + .wr_table = &as3722_writable_table, + .volatile_table = &as3722_volatile_table, +}; + +static int as3722_i2c_of_probe(struct i2c_client *i2c, + struct as3722 *as3722) +{ + struct device_node *np = i2c->dev.of_node; + struct irq_data *irq_data; + + if (!np) { + dev_err(&i2c->dev, "Device Tree not found\n"); + return -EINVAL; + } + + irq_data = irq_get_irq_data(i2c->irq); + if (!irq_data) { + dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq); + return -EINVAL; + } + + as3722->en_intern_int_pullup = of_property_read_bool(np, + "ams,enable-internal-int-pullup"); + as3722->en_intern_i2c_pullup = of_property_read_bool(np, + "ams,enable-internal-i2c-pullup"); + as3722->irq_flags = irqd_get_trigger_type(irq_data); + dev_dbg(&i2c->dev, "IRQ flags are 0x%08lx\n", as3722->irq_flags); + return 0; +} + +static int as3722_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct as3722 *as3722; + unsigned long irq_flags; + int ret; + + as3722 = devm_kzalloc(&i2c->dev, sizeof(struct as3722), GFP_KERNEL); + if (!as3722) + return -ENOMEM; + + as3722->dev = &i2c->dev; + as3722->chip_irq = i2c->irq; + i2c_set_clientdata(i2c, as3722); + + ret = as3722_i2c_of_probe(i2c, as3722); + if (ret < 0) + return ret; + + as3722->regmap = devm_regmap_init_i2c(i2c, &as3722_regmap_config); + if (IS_ERR(as3722->regmap)) { + ret = PTR_ERR(as3722->regmap); + dev_err(&i2c->dev, "regmap init failed: %d\n", ret); + return ret; + } + + ret = as3722_check_device_id(as3722); + if (ret < 0) + return ret; + + irq_flags = as3722->irq_flags | IRQF_ONESHOT; + ret = regmap_add_irq_chip(as3722->regmap, as3722->chip_irq, + irq_flags, -1, &as3722_irq_chip, + &as3722->irq_data); + if (ret < 0) { + dev_err(as3722->dev, "Failed to add regmap irq: %d\n", ret); + return ret; + } + + ret = as3722_configure_pullups(as3722); + if (ret < 0) + goto scrub; + + ret = mfd_add_devices(&i2c->dev, -1, as3722_devs, + ARRAY_SIZE(as3722_devs), NULL, 0, + regmap_irq_get_domain(as3722->irq_data)); + if (ret) { + dev_err(as3722->dev, "Failed to add MFD devices: %d\n", ret); + goto scrub; + } + + dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n"); + return 0; + +scrub: + regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data); + return ret; +} + +static int as3722_i2c_remove(struct i2c_client *i2c) +{ + struct as3722 *as3722 = i2c_get_clientdata(i2c); + + mfd_remove_devices(as3722->dev); + regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data); + return 0; +} + +static const struct of_device_id as3722_of_match[] = { + { .compatible = "ams,as3722", }, + {}, +}; +MODULE_DEVICE_TABLE(of, as3722_of_match); + +static const struct i2c_device_id as3722_i2c_id[] = { + { "as3722", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(i2c, as3722_i2c_id); + +static struct i2c_driver as3722_i2c_driver = { + .driver = { + .name = "as3722", + .owner = THIS_MODULE, + .of_match_table = as3722_of_match, + }, + .probe = as3722_i2c_probe, + .remove = as3722_i2c_remove, + .id_table = as3722_i2c_id, +}; + +module_i2c_driver(as3722_i2c_driver); + +MODULE_DESCRIPTION("I2C support for AS3722 PMICs"); +MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>"); +MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c index 6a9fec40d018..c319c4ef5d49 100644 --- a/drivers/mfd/da9052-i2c.c +++ b/drivers/mfd/da9052-i2c.c @@ -86,7 +86,11 @@ static int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg) return 0; } -static int da9052_i2c_enable_multiwrite(struct da9052 *da9052) +/* + * According to errata item 24, multiwrite mode should be avoided + * in order to prevent register data corruption after power-down. + */ +static int da9052_i2c_disable_multiwrite(struct da9052 *da9052) { int reg_val, ret; @@ -94,8 +98,8 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052) if (ret < 0) return ret; - if (reg_val & DA9052_CONTROL_B_WRITEMODE) { - reg_val &= ~DA9052_CONTROL_B_WRITEMODE; + if (!(reg_val & DA9052_CONTROL_B_WRITEMODE)) { + reg_val |= DA9052_CONTROL_B_WRITEMODE; ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG, reg_val); if (ret < 0) @@ -154,7 +158,7 @@ static int da9052_i2c_probe(struct i2c_client *client, return ret; } - ret = da9052_i2c_enable_multiwrite(da9052); + ret = da9052_i2c_disable_multiwrite(da9052); if (ret < 0) return ret; diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 7245b0c5b794..2ed774e7d342 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -394,16 +394,12 @@ static int pcap_add_subdev(struct pcap_chip *pcap, static int ezx_pcap_remove(struct spi_device *spi) { struct pcap_chip *pcap = spi_get_drvdata(spi); - struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev); - int i, adc_irq; + int i; /* remove all registered subdevs */ device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); /* cleanup ADC */ - adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? - PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE); - devm_free_irq(&spi->dev, adc_irq, pcap); mutex_lock(&pcap->adc_mutex); for (i = 0; i < PCAP_ADC_MAXQ; i++) kfree(pcap->adc_queue[i]); @@ -509,8 +505,6 @@ static int ezx_pcap_probe(struct spi_device *spi) remove_subdevs: device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); -/* free_adc: */ - devm_free_irq(&spi->dev, adc_irq, pcap); free_irqchip: for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) irq_set_chip_and_handler(i, NULL, NULL); diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 9483bc8472a5..da1c6566d93d 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -53,6 +53,7 @@ * document number TBD : Wellsburg * document number TBD : Avoton SoC * document number TBD : Coleto Creek + * document number TBD : Wildcat Point-LP */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -211,6 +212,7 @@ enum lpc_chipsets { LPC_WBG, /* Wellsburg */ LPC_AVN, /* Avoton SoC */ LPC_COLETO, /* Coleto Creek */ + LPC_WPT_LP, /* Wildcat Point-LP */ }; static struct lpc_ich_info lpc_chipset_info[] = { @@ -503,6 +505,10 @@ static struct lpc_ich_info lpc_chipset_info[] = { .name = "Coleto Creek", .iTCO_version = 2, }, + [LPC_WPT_LP] = { + .name = "Lynx Point_LP", + .iTCO_version = 2, + }, }; /* @@ -721,6 +727,13 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = { { PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN}, { PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN}, { PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO}, + { PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP}, + { PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP}, + { PCI_VDEVICE(INTEL, 0x9cc3), LPC_WPT_LP}, + { PCI_VDEVICE(INTEL, 0x9cc5), LPC_WPT_LP}, + { PCI_VDEVICE(INTEL, 0x9cc6), LPC_WPT_LP}, + { PCI_VDEVICE(INTEL, 0x9cc7), LPC_WPT_LP}, + { PCI_VDEVICE(INTEL, 0x9cc9), LPC_WPT_LP}, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, lpc_ich_ids); @@ -969,7 +982,6 @@ static int lpc_ich_probe(struct pci_dev *dev, if (!cell_added) { dev_warn(&dev->dev, "No MFD cells added\n"); lpc_ich_restore_config_space(dev); - pci_set_drvdata(dev, NULL); return -ENODEV; } @@ -980,7 +992,6 @@ static void lpc_ich_remove(struct pci_dev *dev) { mfd_remove_devices(&dev->dev); lpc_ich_restore_config_space(dev); - pci_set_drvdata(dev, NULL); } static struct pci_driver lpc_ich_driver = { diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c index 8cc6aac27cb2..fbfbf0b7f97a 100644 --- a/drivers/mfd/lpc_sch.c +++ b/drivers/mfd/lpc_sch.c @@ -59,18 +59,21 @@ static struct mfd_cell isch_smbus_cell = { .name = "isch_smbus", .num_resources = 1, .resources = &smbus_sch_resource, + .ignore_resource_conflicts = true, }; static struct mfd_cell sch_gpio_cell = { .name = "sch_gpio", .num_resources = 1, .resources = &gpio_sch_resource, + .ignore_resource_conflicts = true, }; static struct mfd_cell wdt_sch_cell = { .name = "ie6xx_wdt", .num_resources = 1, .resources = &wdt_sch_resource, + .ignore_resource_conflicts = true, }; static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = { diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c index 522be67b2e68..34520cbe8afb 100644 --- a/drivers/mfd/max77686.c +++ b/drivers/mfd/max77686.c @@ -31,6 +31,7 @@ #include <linux/mfd/max77686.h> #include <linux/mfd/max77686-private.h> #include <linux/err.h> +#include <linux/of.h> #define I2C_ADDR_RTC (0x0C >> 1) diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c index 1029d018c739..66b58fe77094 100644 --- a/drivers/mfd/max77693-irq.c +++ b/drivers/mfd/max77693-irq.c @@ -128,7 +128,8 @@ static void max77693_irq_sync_unlock(struct irq_data *data) static const inline struct max77693_irq_data * irq_to_max77693_irq(struct max77693_dev *max77693, int irq) { - return &max77693_irqs[irq]; + struct irq_data *data = irq_get_irq_data(irq); + return &max77693_irqs[data->hwirq]; } static void max77693_irq_mask(struct irq_data *data) diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c index c04723efc707..9f92463f4f7e 100644 --- a/drivers/mfd/max77693.c +++ b/drivers/mfd/max77693.c @@ -28,6 +28,7 @@ #include <linux/i2c.h> #include <linux/err.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/mfd/core.h> @@ -110,15 +111,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max77693_dev *max77693; - struct max77693_platform_data *pdata = dev_get_platdata(&i2c->dev); u8 reg_data; int ret = 0; - if (!pdata) { - dev_err(&i2c->dev, "No platform data found.\n"); - return -EINVAL; - } - max77693 = devm_kzalloc(&i2c->dev, sizeof(struct max77693_dev), GFP_KERNEL); if (max77693 == NULL) @@ -138,8 +133,6 @@ static int max77693_i2c_probe(struct i2c_client *i2c, return ret; } - max77693->wakeup = pdata->wakeup; - ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2, ®_data); if (ret < 0) { @@ -179,8 +172,6 @@ static int max77693_i2c_probe(struct i2c_client *i2c, if (ret < 0) goto err_mfd; - device_init_wakeup(max77693->dev, pdata->wakeup); - return ret; err_mfd: @@ -235,11 +226,19 @@ static const struct dev_pm_ops max77693_pm = { .resume = max77693_resume, }; +#ifdef CONFIG_OF +static struct of_device_id max77693_dt_match[] = { + { .compatible = "maxim,max77693" }, + {}, +}; +#endif + static struct i2c_driver max77693_i2c_driver = { .driver = { .name = "max77693", .owner = THIS_MODULE, .pm = &max77693_pm, + .of_match_table = of_match_ptr(max77693_dt_match), }, .probe = max77693_i2c_probe, .remove = max77693_i2c_remove, diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c index e9b1c93a3ade..3bbfedc07f41 100644 --- a/drivers/mfd/max8907.c +++ b/drivers/mfd/max8907.c @@ -17,6 +17,7 @@ #include <linux/mfd/core.h> #include <linux/mfd/max8907.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_device.h> #include <linux/regmap.h> #include <linux/slab.h> diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c index de7fb80a6052..176aa26fc787 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -238,7 +238,7 @@ static struct i2c_driver max8925_driver = { .name = "max8925", .owner = THIS_MODULE, .pm = &max8925_pm_ops, - .of_match_table = of_match_ptr(max8925_dt_ids), + .of_match_table = max8925_dt_ids, }, .probe = max8925_probe, .remove = max8925_remove, diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index cee098c0dae3..791aea3e96ce 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@ -24,6 +24,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c index f745e27ee874..898bd335cd8e 100644 --- a/drivers/mfd/mc13xxx-i2c.c +++ b/drivers/mfd/mc13xxx-i2c.c @@ -78,7 +78,6 @@ static int mc13xxx_i2c_probe(struct i2c_client *client, ret = PTR_ERR(mc13xxx->regmap); dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n", ret); - dev_set_drvdata(&client->dev, NULL); return ret; } diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index adc8ea36e7c4..267649244737 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -64,7 +64,8 @@ int mfd_cell_disable(struct platform_device *pdev) EXPORT_SYMBOL(mfd_cell_disable); static int mfd_platform_add_cell(struct platform_device *pdev, - const struct mfd_cell *cell) + const struct mfd_cell *cell, + atomic_t *usage_count) { if (!cell) return 0; @@ -73,11 +74,12 @@ static int mfd_platform_add_cell(struct platform_device *pdev, if (!pdev->mfd_cell) return -ENOMEM; + pdev->mfd_cell->usage_count = usage_count; return 0; } static int mfd_add_device(struct device *parent, int id, - const struct mfd_cell *cell, + const struct mfd_cell *cell, atomic_t *usage_count, struct resource *mem_base, int irq_base, struct irq_domain *domain) { @@ -123,7 +125,7 @@ static int mfd_add_device(struct device *parent, int id, goto fail_alias; } - ret = mfd_platform_add_cell(pdev, cell); + ret = mfd_platform_add_cell(pdev, cell, usage_count); if (ret) goto fail_alias; @@ -192,12 +194,12 @@ fail_alloc: } int mfd_add_devices(struct device *parent, int id, - struct mfd_cell *cells, int n_devs, + const struct mfd_cell *cells, int n_devs, struct resource *mem_base, int irq_base, struct irq_domain *domain) { int i; - int ret = 0; + int ret; atomic_t *cnts; /* initialize reference counting for all cells */ @@ -207,16 +209,19 @@ int mfd_add_devices(struct device *parent, int id, for (i = 0; i < n_devs; i++) { atomic_set(&cnts[i], 0); - cells[i].usage_count = &cnts[i]; - ret = mfd_add_device(parent, id, cells + i, mem_base, + ret = mfd_add_device(parent, id, cells + i, cnts + i, mem_base, irq_base, domain); if (ret) - break; + goto fail; } - if (ret) - mfd_remove_devices(parent); + return 0; +fail: + if (i) + mfd_remove_devices(parent); + else + kfree(cnts); return ret; } EXPORT_SYMBOL(mfd_add_devices); @@ -271,8 +276,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) for (i = 0; i < n_clones; i++) { cell_entry.name = clones[i]; /* don't give up if a single call fails; just report error */ - if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0, - NULL)) + if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, + cell_entry.usage_count, NULL, 0, NULL)) dev_err(dev, "failed to create platform device '%s'\n", clones[i]); } diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 29ee54d68512..142650fdc058 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -328,13 +328,13 @@ static int usbhs_runtime_resume(struct device *dev) omap_tll_enable(pdata); if (!IS_ERR(omap->ehci_logic_fck)) - clk_enable(omap->ehci_logic_fck); + clk_prepare_enable(omap->ehci_logic_fck); for (i = 0; i < omap->nports; i++) { switch (pdata->port_mode[i]) { case OMAP_EHCI_PORT_MODE_HSIC: if (!IS_ERR(omap->hsic60m_clk[i])) { - r = clk_enable(omap->hsic60m_clk[i]); + r = clk_prepare_enable(omap->hsic60m_clk[i]); if (r) { dev_err(dev, "Can't enable port %d hsic60m clk:%d\n", @@ -343,7 +343,7 @@ static int usbhs_runtime_resume(struct device *dev) } if (!IS_ERR(omap->hsic480m_clk[i])) { - r = clk_enable(omap->hsic480m_clk[i]); + r = clk_prepare_enable(omap->hsic480m_clk[i]); if (r) { dev_err(dev, "Can't enable port %d hsic480m clk:%d\n", @@ -354,7 +354,7 @@ static int usbhs_runtime_resume(struct device *dev) case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) { - r = clk_enable(omap->utmi_clk[i]); + r = clk_prepare_enable(omap->utmi_clk[i]); if (r) { dev_err(dev, "Can't enable port %d clk : %d\n", @@ -382,15 +382,15 @@ static int usbhs_runtime_suspend(struct device *dev) switch (pdata->port_mode[i]) { case OMAP_EHCI_PORT_MODE_HSIC: if (!IS_ERR(omap->hsic60m_clk[i])) - clk_disable(omap->hsic60m_clk[i]); + clk_disable_unprepare(omap->hsic60m_clk[i]); if (!IS_ERR(omap->hsic480m_clk[i])) - clk_disable(omap->hsic480m_clk[i]); + clk_disable_unprepare(omap->hsic480m_clk[i]); /* Fall through as utmi_clks were used in HSIC mode */ case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) - clk_disable(omap->utmi_clk[i]); + clk_disable_unprepare(omap->utmi_clk[i]); break; default: break; @@ -398,7 +398,7 @@ static int usbhs_runtime_suspend(struct device *dev) } if (!IS_ERR(omap->ehci_logic_fck)) - clk_disable(omap->ehci_logic_fck); + clk_disable_unprepare(omap->ehci_logic_fck); omap_tll_disable(pdata); @@ -893,7 +893,7 @@ static struct platform_driver usbhs_omap_driver = { .name = (char *)usbhs_driver_name, .owner = THIS_MODULE, .pm = &usbhsomap_dev_pm_ops, - .of_match_table = of_match_ptr(usbhs_omap_dt_ids), + .of_match_table = usbhs_omap_dt_ids, }, .remove = usbhs_omap_remove, }; diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index e59ac4cbac96..0d946ae14453 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -320,7 +320,7 @@ static struct platform_driver usbtll_omap_driver = { .driver = { .name = (char *)usbtll_driver_name, .owner = THIS_MODULE, - .of_match_table = of_match_ptr(usbtll_omap_dt_ids), + .of_match_table = usbtll_omap_dt_ids, }, .probe = usbtll_omap_probe, .remove = usbtll_omap_remove, @@ -429,7 +429,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata) if (IS_ERR(tll->ch_clk[i])) continue; - r = clk_enable(tll->ch_clk[i]); + r = clk_prepare_enable(tll->ch_clk[i]); if (r) { dev_err(tll_dev, "Error enabling ch %d clock: %d\n", i, r); @@ -460,7 +460,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata) for (i = 0; i < tll->nch; i++) { if (omap_usb_mode_needs_tll(pdata->port_mode[i])) { if (!IS_ERR(tll->ch_clk[i])) - clk_disable(tll->ch_clk[i]); + clk_disable_unprepare(tll->ch_clk[i]); } } diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c index 135afabe4ae2..d280d789e55a 100644 --- a/drivers/mfd/palmas.c +++ b/drivers/mfd/palmas.c @@ -368,6 +368,7 @@ static const struct of_device_id of_palmas_match_tbl[] = { }, { }, }; +MODULE_DEVICE_TABLE(of, of_palmas_match_tbl); static int palmas_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) @@ -402,7 +403,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c, palmas->dev = &i2c->dev; palmas->irq = i2c->irq; - match = of_match_device(of_match_ptr(of_palmas_match_tbl), &i2c->dev); + match = of_match_device(of_palmas_match_tbl, &i2c->dev); if (!match) return -ENODATA; @@ -421,7 +422,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c, dev_err(palmas->dev, "can't attach client %d\n", i); ret = -ENOMEM; - goto err; + goto err_i2c; } palmas->i2c_clients[i]->dev.of_node = of_node_get(node); } @@ -432,7 +433,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c, dev_err(palmas->dev, "Failed to allocate regmap %d, err: %d\n", i, ret); - goto err; + goto err_i2c; } } @@ -451,7 +452,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c, reg); if (ret < 0) { dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret); - goto err; + goto err_i2c; } /* Change IRQ into clear on read mode for efficiency */ @@ -465,7 +466,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c, IRQF_ONESHOT | pdata->irq_flags, 0, &palmas_irq_chip, &palmas->irq_data); if (ret < 0) - goto err; + goto err_i2c; no_irq: slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE); @@ -551,7 +552,6 @@ no_irq: } else if (pdata->pm_off && !pm_power_off) { palmas_dev = palmas; pm_power_off = palmas_power_off; - return ret; } } @@ -559,17 +559,31 @@ no_irq: err_irq: regmap_del_irq_chip(palmas->irq, palmas->irq_data); -err: +err_i2c: + for (i = 1; i < PALMAS_NUM_CLIENTS; i++) { + if (palmas->i2c_clients[i]) + i2c_unregister_device(palmas->i2c_clients[i]); + } return ret; } static int palmas_i2c_remove(struct i2c_client *i2c) { struct palmas *palmas = i2c_get_clientdata(i2c); + int i; - mfd_remove_devices(palmas->dev); regmap_del_irq_chip(palmas->irq, palmas->irq_data); + for (i = 1; i < PALMAS_NUM_CLIENTS; i++) { + if (palmas->i2c_clients[i]) + i2c_unregister_device(palmas->i2c_clients[i]); + } + + if (palmas == palmas_dev) { + pm_power_off = NULL; + palmas_dev = NULL; + } + return 0; } diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c index a6841f77aa5e..484fe66e6c88 100644 --- a/drivers/mfd/pm8921-core.c +++ b/drivers/mfd/pm8921-core.c @@ -171,11 +171,12 @@ static int pm8921_remove(struct platform_device *pdev) drvdata = platform_get_drvdata(pdev); if (drvdata) pmic = drvdata->pm_chip_data; - if (pmic) + if (pmic) { mfd_remove_devices(pmic->dev); - if (pmic->irq_chip) { - pm8xxx_irq_exit(pmic->irq_chip); - pmic->irq_chip = NULL; + if (pmic->irq_chip) { + pm8xxx_irq_exit(pmic->irq_chip); + pmic->irq_chip = NULL; + } } return 0; diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c index 3b835f593e35..573de7bfcced 100644 --- a/drivers/mfd/rts5249.c +++ b/drivers/mfd/rts5249.c @@ -130,13 +130,57 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr) { int err; - err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, 0xFE46); + err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, + PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED | + PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN | + PHY_REG_REV_RX_PWST | PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 | + PHY_REG_REV_STOP_CLKRD | PHY_REG_REV_STOP_CLKWR); if (err < 0) return err; msleep(1); - return rtsx_pci_write_phy_register(pcr, PHY_BPCR, 0x05C0); + err = rtsx_pci_write_phy_register(pcr, PHY_BPCR, + PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL | + PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_PCR, + PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 | + PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 | + PHY_PCR_RSSI_EN); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_RCR2, + PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR | + PHY_RCR2_CDR_CP_10 | PHY_RCR2_CDR_SR_2 | + PHY_RCR2_FREQSEL_12 | PHY_RCR2_CPADJEN | + PHY_RCR2_CDR_SC_8 | PHY_RCR2_CALIB_LATE); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_FLD4, + PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF | + PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA | + PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER | + PHY_FLD4_BER_CHK_EN); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_RDR, PHY_RDR_RXDSEL_1_9); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_RCR1, + PHY_RCR1_ADP_TIME | PHY_RCR1_VCO_COARSE); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_FLD3, + PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 | + PHY_FLD3_RXDELINK); + if (err < 0) + return err; + return rtsx_pci_write_phy_register(pcr, PHY_TUNE, + PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 | + PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 | + PHY_TUNE_TUNED12); } static int rts5249_turn_on_led(struct rtsx_pcr *pcr) diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index e6ae7720f9e1..11e20afbdcac 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -1149,7 +1149,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, pcr->remap_addr = ioremap_nocache(base, len); if (!pcr->remap_addr) { ret = -ENOMEM; - goto free_host; + goto free_handle; } pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), @@ -1209,8 +1209,6 @@ disable_msi: pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); unmap: iounmap(pcr->remap_addr); -free_host: - dev_set_drvdata(&pcidev->dev, NULL); free_handle: kfree(handle); free_pcr: @@ -1242,7 +1240,6 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) pci_disable_msi(pcr->pci); iounmap(pcr->remap_addr); - dev_set_drvdata(&pcidev->dev, NULL); pci_release_regions(pcidev); pci_disable_device(pcidev); diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index f530e4b73f19..34c18fb8c089 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -17,6 +17,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 33f040c558d0..c2c8c91c6c7b 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1232,7 +1232,7 @@ static ssize_t sm501_dbg_regs(struct device *dev, } -static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL); +static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL); /* sm501_init_reg * @@ -1660,7 +1660,6 @@ static int sm501_pci_probe(struct pci_dev *dev, err3: pci_disable_device(dev); err2: - pci_set_drvdata(dev, NULL); kfree(sm); err1: return err; @@ -1695,7 +1694,6 @@ static void sm501_pci_remove(struct pci_dev *dev) release_resource(sm->regs_claim); kfree(sm->regs_claim); - pci_set_drvdata(dev, NULL); pci_disable_device(dev); } diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c new file mode 100644 index 000000000000..1243d5c6a448 --- /dev/null +++ b/drivers/mfd/stw481x.c @@ -0,0 +1,250 @@ +/* + * Core driver for STw4810/STw4811 + * + * Copyright (C) 2013 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/mfd/core.h> +#include <linux/mfd/stw481x.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/spinlock.h> +#include <linux/slab.h> + +/* + * This driver can only access the non-USB portions of STw4811, the register + * range 0x00-0x10 dealing with USB is bound to the two special I2C pins used + * for USB control. + */ + +/* Registers inside the power control address space */ +#define STW_PC_VCORE_SEL 0x05U +#define STW_PC_VAUX_SEL 0x06U +#define STW_PC_VPLL_SEL 0x07U + +/** + * stw481x_get_pctl_reg() - get a power control register + * @stw481x: handle to the stw481x chip + * @reg: power control register to fetch + * + * The power control registers is a set of one-time-programmable registers + * in its own register space, accessed by writing addess bits to these + * two registers: bits 7,6,5 of PCTL_REG_LO corresponds to the 3 LSBs of + * the address and bits 8,9 of PCTL_REG_HI corresponds to the 2 MSBs of + * the address, forming an address space of 5 bits, i.e. 32 registers + * 0x00 ... 0x1f can be obtained. + */ +static int stw481x_get_pctl_reg(struct stw481x *stw481x, u8 reg) +{ + u8 msb = (reg >> 3) & 0x03; + u8 lsb = (reg << 5) & 0xe0; + unsigned int val; + u8 vrfy; + int ret; + + ret = regmap_write(stw481x->map, STW_PCTL_REG_HI, msb); + if (ret) + return ret; + ret = regmap_write(stw481x->map, STW_PCTL_REG_LO, lsb); + if (ret) + return ret; + ret = regmap_read(stw481x->map, STW_PCTL_REG_HI, &val); + if (ret) + return ret; + vrfy = (val & 0x03) << 3; + ret = regmap_read(stw481x->map, STW_PCTL_REG_LO, &val); + if (ret) + return ret; + vrfy |= ((val >> 5) & 0x07); + if (vrfy != reg) + return -EIO; + return (val >> 1) & 0x0f; +} + +static int stw481x_startup(struct stw481x *stw481x) +{ + /* Voltages multiplied by 100 */ + u8 vcore_val[] = { 100, 105, 110, 115, 120, 122, 124, 126, 128, + 130, 132, 134, 136, 138, 140, 145 }; + u8 vpll_val[] = { 105, 120, 130, 180 }; + u8 vaux_val[] = { 15, 18, 25, 28 }; + u8 vcore; + u8 vcore_slp; + u8 vpll; + u8 vaux; + bool vaux_en; + bool it_warn; + int ret; + unsigned int val; + + ret = regmap_read(stw481x->map, STW_CONF1, &val); + if (ret) + return ret; + vaux_en = !!(val & STW_CONF1_PDN_VAUX); + it_warn = !!(val & STW_CONF1_IT_WARN); + + dev_info(&stw481x->client->dev, "voltages %s\n", + (val & STW_CONF1_V_MONITORING) ? "OK" : "LOW"); + dev_info(&stw481x->client->dev, "MMC level shifter %s\n", + (val & STW_CONF1_MMC_LS_STATUS) ? "high impedance" : "ON"); + dev_info(&stw481x->client->dev, "VMMC: %s\n", + (val & STW_CONF1_PDN_VMMC) ? "ON" : "disabled"); + + dev_info(&stw481x->client->dev, "STw481x power control registers:\n"); + + ret = stw481x_get_pctl_reg(stw481x, STW_PC_VCORE_SEL); + if (ret < 0) + return ret; + vcore = ret & 0x0f; + + ret = stw481x_get_pctl_reg(stw481x, STW_PC_VAUX_SEL); + if (ret < 0) + return ret; + vaux = (ret >> 2) & 3; + vpll = (ret >> 4) & 1; /* Save bit 4 */ + + ret = stw481x_get_pctl_reg(stw481x, STW_PC_VPLL_SEL); + if (ret < 0) + return ret; + vpll |= (ret >> 1) & 2; + + dev_info(&stw481x->client->dev, "VCORE: %u.%uV %s\n", + vcore_val[vcore] / 100, vcore_val[vcore] % 100, + (ret & 4) ? "ON" : "OFF"); + + dev_info(&stw481x->client->dev, "VPLL: %u.%uV %s\n", + vpll_val[vpll] / 100, vpll_val[vpll] % 100, + (ret & 0x10) ? "ON" : "OFF"); + + dev_info(&stw481x->client->dev, "VAUX: %u.%uV %s\n", + vaux_val[vaux] / 10, vaux_val[vaux] % 10, + vaux_en ? "ON" : "OFF"); + + ret = regmap_read(stw481x->map, STW_CONF2, &val); + if (ret) + return ret; + + dev_info(&stw481x->client->dev, "TWARN: %s threshold, %s\n", + it_warn ? "below" : "above", + (val & STW_CONF2_MASK_TWARN) ? + "enabled" : "mask through VDDOK"); + dev_info(&stw481x->client->dev, "VMMC: %s\n", + (val & STW_CONF2_VMMC_EXT) ? "internal" : "external"); + dev_info(&stw481x->client->dev, "IT WAKE UP: %s\n", + (val & STW_CONF2_MASK_IT_WAKE_UP) ? "enabled" : "masked"); + dev_info(&stw481x->client->dev, "GPO1: %s\n", + (val & STW_CONF2_GPO1) ? "low" : "high impedance"); + dev_info(&stw481x->client->dev, "GPO2: %s\n", + (val & STW_CONF2_GPO2) ? "low" : "high impedance"); + + ret = regmap_read(stw481x->map, STW_VCORE_SLEEP, &val); + if (ret) + return ret; + vcore_slp = val & 0x0f; + dev_info(&stw481x->client->dev, "VCORE SLEEP: %u.%uV\n", + vcore_val[vcore_slp] / 100, vcore_val[vcore_slp] % 100); + + return 0; +} + +/* + * MFD cells - we have one cell which is selected operation + * mode, and we always have a GPIO cell. + */ +static struct mfd_cell stw481x_cells[] = { + { + .of_compatible = "st,stw481x-vmmc", + .name = "stw481x-vmmc-regulator", + .id = -1, + }, +}; + +const struct regmap_config stw481x_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int stw481x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct stw481x *stw481x; + int ret; + int i; + + stw481x = devm_kzalloc(&client->dev, sizeof(*stw481x), GFP_KERNEL); + if (!stw481x) + return -ENOMEM; + + i2c_set_clientdata(client, stw481x); + stw481x->client = client; + stw481x->map = devm_regmap_init_i2c(client, &stw481x_regmap_config); + + ret = stw481x_startup(stw481x); + if (ret) { + dev_err(&client->dev, "chip initialization failed\n"); + return ret; + } + + /* Set up and register the platform devices. */ + for (i = 0; i < ARRAY_SIZE(stw481x_cells); i++) { + /* One state holder for all drivers, this is simple */ + stw481x_cells[i].platform_data = stw481x; + stw481x_cells[i].pdata_size = sizeof(*stw481x); + } + + ret = mfd_add_devices(&client->dev, 0, stw481x_cells, + ARRAY_SIZE(stw481x_cells), NULL, 0, NULL); + if (ret) + return ret; + + dev_info(&client->dev, "initialized STw481x device\n"); + + return ret; +} + +static int stw481x_remove(struct i2c_client *client) +{ + mfd_remove_devices(&client->dev); + return 0; +} + +/* + * This ID table is completely unused, as this is a pure + * device-tree probed driver, but it has to be here due to + * the structure of the I2C core. + */ +static const struct i2c_device_id stw481x_id[] = { + { "stw481x", 0 }, + { }, +}; + +static const struct of_device_id stw481x_match[] = { + { .compatible = "st,stw4810", }, + { .compatible = "st,stw4811", }, + { }, +}; +MODULE_DEVICE_TABLE(of, stw481x_match); + +static struct i2c_driver stw481x_driver = { + .driver = { + .name = "stw481x", + .of_match_table = stw481x_match, + }, + .probe = stw481x_probe, + .remove = stw481x_remove, + .id_table = stw481x_id, +}; + +module_i2c_driver(stw481x_driver); + +MODULE_AUTHOR("Linus Walleij"); +MODULE_DESCRIPTION("STw481x PMIC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index 70f4909fee13..87ea51dc6234 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -16,6 +16,19 @@ #include <linux/mfd/core.h> #include <linux/mfd/tc3589x.h> +/** + * enum tc3589x_version - indicates the TC3589x version + */ +enum tc3589x_version { + TC3589X_TC35890, + TC3589X_TC35892, + TC3589X_TC35893, + TC3589X_TC35894, + TC3589X_TC35895, + TC3589X_TC35896, + TC3589X_UNKNOWN, +}; + #define TC3589x_CLKMODE_MODCTL_SLEEP 0x0 #define TC3589x_CLKMODE_MODCTL_OPERATION (1 << 0) @@ -361,7 +374,21 @@ static int tc3589x_probe(struct i2c_client *i2c, tc3589x->i2c = i2c; tc3589x->pdata = pdata; tc3589x->irq_base = pdata->irq_base; - tc3589x->num_gpio = id->driver_data; + + switch (id->driver_data) { + case TC3589X_TC35893: + case TC3589X_TC35895: + case TC3589X_TC35896: + tc3589x->num_gpio = 20; + break; + case TC3589X_TC35890: + case TC3589X_TC35892: + case TC3589X_TC35894: + case TC3589X_UNKNOWN: + default: + tc3589x->num_gpio = 24; + break; + } i2c_set_clientdata(i2c, tc3589x); @@ -432,7 +459,13 @@ static int tc3589x_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend, tc3589x_resume); static const struct i2c_device_id tc3589x_id[] = { - { "tc3589x", 24 }, + { "tc35890", TC3589X_TC35890 }, + { "tc35892", TC3589X_TC35892 }, + { "tc35893", TC3589X_TC35893 }, + { "tc35894", TC3589X_TC35894 }, + { "tc35895", TC3589X_TC35895 }, + { "tc35896", TC3589X_TC35896 }, + { "tc3589x", TC3589X_UNKNOWN }, { } }; MODULE_DEVICE_TABLE(i2c, tc3589x_id); diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c index 1c2b994e1f6c..71e3e0c5bf73 100644 --- a/drivers/mfd/ti-ssp.c +++ b/drivers/mfd/ti-ssp.c @@ -445,7 +445,6 @@ static int ti_ssp_remove(struct platform_device *pdev) iounmap(ssp->regs); release_mem_region(ssp->res->start, resource_size(ssp->res)); kfree(ssp); - dev_set_drvdata(dev, NULL); return 0; } diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index baaf5a8123bb..88718abfb9ba 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -56,21 +56,25 @@ EXPORT_SYMBOL_GPL(am335x_tsc_se_update); void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val) { - spin_lock(&tsadc->reg_lock); + unsigned long flags; + + spin_lock_irqsave(&tsadc->reg_lock, flags); tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE); tsadc->reg_se_cache |= val; am335x_tsc_se_update(tsadc); - spin_unlock(&tsadc->reg_lock); + spin_unlock_irqrestore(&tsadc->reg_lock, flags); } EXPORT_SYMBOL_GPL(am335x_tsc_se_set); void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val) { - spin_lock(&tsadc->reg_lock); + unsigned long flags; + + spin_lock_irqsave(&tsadc->reg_lock, flags); tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE); tsadc->reg_se_cache &= ~val; am335x_tsc_se_update(tsadc); - spin_unlock(&tsadc->reg_lock); + spin_unlock_irqrestore(&tsadc->reg_lock, flags); } EXPORT_SYMBOL_GPL(am335x_tsc_se_clr); @@ -95,7 +99,7 @@ static int ti_tscadc_probe(struct platform_device *pdev) const __be32 *cur; u32 val; int err, ctrl; - int clk_value, clock_rate; + int clock_rate; int tsc_wires = 0, adc_channels = 0, total_channels; int readouts = 0; @@ -196,11 +200,11 @@ static int ti_tscadc_probe(struct platform_device *pdev) } clock_rate = clk_get_rate(clk); clk_put(clk); - clk_value = clock_rate / ADC_CLK; + tscadc->clk_div = clock_rate / ADC_CLK; /* TSCADC_CLKDIV needs to be configured to the value minus 1 */ - clk_value = clk_value - 1; - tscadc_writel(tscadc, REG_CLKDIV, clk_value); + tscadc->clk_div--; + tscadc_writel(tscadc, REG_CLKDIV, tscadc->clk_div); /* Set the control register bits */ ctrl = CNTRLREG_STEPCONFIGWRT | @@ -303,6 +307,8 @@ static int tscadc_resume(struct device *dev) tscadc_writel(tscadc_dev, REG_CTRL, (restore | CNTRLREG_TSCSSENB)); + tscadc_writel(tscadc_dev, REG_CLKDIV, tscadc_dev->clk_div); + return 0; } @@ -326,7 +332,7 @@ static struct platform_driver ti_tscadc_driver = { .name = "ti_am3359-tscadc", .owner = THIS_MODULE, .pm = TSCADC_PM_OPS, - .of_match_table = of_match_ptr(ti_tscadc_dt_ids), + .of_match_table = ti_tscadc_dt_ids, }, .probe = ti_tscadc_probe, .remove = ti_tscadc_remove, diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c index a6755ec7bd6a..dbb34f94e5e3 100644 --- a/drivers/mfd/timberdale.c +++ b/drivers/mfd/timberdale.c @@ -678,7 +678,7 @@ static int timb_probe(struct pci_dev *dev, priv->ctl_mapbase = mapbase + CHIPCTLOFFSET; if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) { dev_err(&dev->dev, "Failed to request ctl mem\n"); - goto err_request; + goto err_start; } priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE); @@ -828,13 +828,10 @@ err_config: iounmap(priv->ctl_membase); err_ioremap: release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE); -err_request: - pci_set_drvdata(dev, NULL); err_start: pci_disable_device(dev); err_enable: kfree(priv); - pci_set_drvdata(dev, NULL); return -ENODEV; } @@ -851,7 +848,6 @@ static void timb_remove(struct pci_dev *dev) pci_disable_msix(dev); pci_disable_device(dev); - pci_set_drvdata(dev, NULL); kfree(priv); } diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c index 5ad4b772b097..a081b925d10b 100644 --- a/drivers/mfd/tps6507x.c +++ b/drivers/mfd/tps6507x.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> +#include <linux/of.h> #include <linux/of_device.h> #include <linux/mfd/core.h> #include <linux/mfd/tps6507x.h> diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c index b8f48647661e..b7be0b295575 100644 --- a/drivers/mfd/tps65217.c +++ b/drivers/mfd/tps65217.c @@ -245,7 +245,7 @@ static struct i2c_driver tps65217_driver = { .driver = { .name = "tps65217", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(tps65217_of_match), + .of_match_table = tps65217_of_match, }, .id_table = tps65217_id_table, .probe = tps65217_probe, diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index f54fe4d4f77b..ee61fd7c198d 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -26,6 +26,7 @@ #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/of.h> #include <linux/mfd/core.h> #include <linux/mfd/tps6586x.h> @@ -124,6 +125,7 @@ struct tps6586x { struct i2c_client *client; struct regmap *regmap; + int irq; struct irq_chip irq_chip; struct mutex irq_lock; int irq_base; @@ -261,12 +263,23 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data) mutex_unlock(&tps6586x->irq_lock); } +#ifdef CONFIG_PM_SLEEP +static int tps6586x_irq_set_wake(struct irq_data *irq_data, unsigned int on) +{ + struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data); + return irq_set_irq_wake(tps6586x->irq, on); +} +#else +#define tps6586x_irq_set_wake NULL +#endif + static struct irq_chip tps6586x_irq_chip = { .name = "tps6586x", .irq_bus_lock = tps6586x_irq_lock, .irq_bus_sync_unlock = tps6586x_irq_sync_unlock, .irq_disable = tps6586x_irq_disable, .irq_enable = tps6586x_irq_enable, + .irq_set_wake = tps6586x_irq_set_wake, }; static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq, @@ -331,6 +344,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq, int new_irq_base; int irq_num = ARRAY_SIZE(tps6586x_irqs); + tps6586x->irq = irq; + mutex_init(&tps6586x->irq_lock); for (i = 0; i < 5; i++) { tps6586x->mask_reg[i] = 0xff; @@ -360,10 +375,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq, ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT, "tps6586x", tps6586x); - if (!ret) { + if (!ret) device_init_wakeup(tps6586x->dev, 1); - enable_irq_wake(irq); - } return ret; } diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c index d79277204835..c0f608e3ca9e 100644 --- a/drivers/mfd/tps65910.c +++ b/drivers/mfd/tps65910.c @@ -25,6 +25,7 @@ #include <linux/mfd/core.h> #include <linux/regmap.h> #include <linux/mfd/tps65910.h> +#include <linux/of.h> #include <linux/of_device.h> static struct resource rtc_resources[] = { @@ -410,14 +411,10 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client, ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop); if (!ret) board_info->vmbch_threshold = prop; - else if (*chip_id == TPS65911) - dev_warn(&client->dev, "VMBCH-Threshold not specified"); ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop); if (!ret) board_info->vmbch2_threshold = prop; - else if (*chip_id == TPS65911) - dev_warn(&client->dev, "VMBCH2-Threshold not specified"); prop = of_property_read_bool(np, "ti,en-ck32k-xtal"); board_info->en_ck32k_xtal = prop; diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index daf66942071c..0779d5ab9ab1 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -565,13 +565,13 @@ static int twl6040_probe(struct i2c_client *client, twl6040->supplies); if (ret != 0) { dev_err(&client->dev, "Failed to get supplies: %d\n", ret); - goto regulator_get_err; + return ret; } ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies); if (ret != 0) { dev_err(&client->dev, "Failed to enable supplies: %d\n", ret); - goto regulator_get_err; + return ret; } twl6040->dev = &client->dev; @@ -619,7 +619,7 @@ static int twl6040_probe(struct i2c_client *client, "twl6040_irq_th", twl6040); if (ret) { dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret); - goto thirq_err; + goto readyirq_err; } /* dual-access registers controlled by I2C only */ @@ -659,21 +659,14 @@ static int twl6040_probe(struct i2c_client *client, ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children, NULL, 0, NULL); if (ret) - goto mfd_err; + goto readyirq_err; return 0; -mfd_err: - devm_free_irq(&client->dev, twl6040->irq_th, twl6040); -thirq_err: - devm_free_irq(&client->dev, twl6040->irq_ready, twl6040); readyirq_err: regmap_del_irq_chip(twl6040->irq, twl6040->irq_data); gpio_err: regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies); -regulator_get_err: - i2c_set_clientdata(client, NULL); - return ret; } @@ -684,12 +677,9 @@ static int twl6040_remove(struct i2c_client *client) if (twl6040->power_count) twl6040_power(twl6040, 0); - devm_free_irq(&client->dev, twl6040->irq_ready, twl6040); - devm_free_irq(&client->dev, twl6040->irq_th, twl6040); regmap_del_irq_chip(twl6040->irq, twl6040->irq_data); mfd_remove_devices(&client->dev); - i2c_set_clientdata(client, NULL); regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies); diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index d5966e6b5a7d..0313f839e8fa 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c @@ -553,6 +553,7 @@ static int ucb1x00_probe(struct mcp *mcp) if (ucb->irq_base < 0) { dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n", ucb->irq_base); + ret = ucb->irq_base; goto err_irq_alloc; } diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c index 802dd3cb18cf..1e9a4b2102f9 100644 --- a/drivers/mfd/wm5102-tables.c +++ b/drivers/mfd/wm5102-tables.c @@ -903,7 +903,6 @@ static const struct reg_default wm5102_reg_default[] = { { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */ - { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */ diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c index 3113e39b318e..bf8b3b5ad1fe 100644 --- a/drivers/mfd/wm5110-tables.c +++ b/drivers/mfd/wm5110-tables.c @@ -243,6 +243,12 @@ int wm5110_patch(struct arizona *arizona) EXPORT_SYMBOL_GPL(wm5110_patch); static const struct regmap_irq wm5110_aod_irqs[ARIZONA_NUM_IRQ] = { + [ARIZONA_IRQ_MICD_CLAMP_FALL] = { + .mask = ARIZONA_MICD_CLAMP_FALL_EINT1 + }, + [ARIZONA_IRQ_MICD_CLAMP_RISE] = { + .mask = ARIZONA_MICD_CLAMP_RISE_EINT1 + }, [ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 }, [ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 }, [ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 }, @@ -505,6 +511,7 @@ static const struct reg_default wm5110_reg_default[] = { { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */ { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */ { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */ + { 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */ { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */ { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */ { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */ @@ -592,7 +599,7 @@ static const struct reg_default wm5110_reg_default[] = { { 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */ { 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */ { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */ - { 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */ + { 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */ { 0x00000480, 0x0040 }, /* R1152 - Class W ANC Threshold 1 */ { 0x00000481, 0x0040 }, /* R1153 - Class W ANC Threshold 2 */ { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */ @@ -1204,7 +1211,6 @@ static const struct reg_default wm5110_reg_default[] = { { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */ - { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */ @@ -1440,6 +1446,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) case ARIZONA_ACCESSORY_DETECT_MODE_1: case ARIZONA_HEADPHONE_DETECT_1: case ARIZONA_HEADPHONE_DETECT_2: + case ARIZONA_MICD_CLAMP_CONTROL: case ARIZONA_MIC_DETECT_1: case ARIZONA_MIC_DETECT_2: case ARIZONA_MIC_DETECT_3: @@ -2291,21 +2298,37 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) case ARIZONA_DSP1_STATUS_1: case ARIZONA_DSP1_STATUS_2: case ARIZONA_DSP1_STATUS_3: + case ARIZONA_DSP1_SCRATCH_0: + case ARIZONA_DSP1_SCRATCH_1: + case ARIZONA_DSP1_SCRATCH_2: + case ARIZONA_DSP1_SCRATCH_3: case ARIZONA_DSP2_CONTROL_1: case ARIZONA_DSP2_CLOCKING_1: case ARIZONA_DSP2_STATUS_1: case ARIZONA_DSP2_STATUS_2: case ARIZONA_DSP2_STATUS_3: + case ARIZONA_DSP2_SCRATCH_0: + case ARIZONA_DSP2_SCRATCH_1: + case ARIZONA_DSP2_SCRATCH_2: + case ARIZONA_DSP2_SCRATCH_3: case ARIZONA_DSP3_CONTROL_1: case ARIZONA_DSP3_CLOCKING_1: case ARIZONA_DSP3_STATUS_1: case ARIZONA_DSP3_STATUS_2: case ARIZONA_DSP3_STATUS_3: + case ARIZONA_DSP3_SCRATCH_0: + case ARIZONA_DSP3_SCRATCH_1: + case ARIZONA_DSP3_SCRATCH_2: + case ARIZONA_DSP3_SCRATCH_3: case ARIZONA_DSP4_CONTROL_1: case ARIZONA_DSP4_CLOCKING_1: case ARIZONA_DSP4_STATUS_1: case ARIZONA_DSP4_STATUS_2: case ARIZONA_DSP4_STATUS_3: + case ARIZONA_DSP4_SCRATCH_0: + case ARIZONA_DSP4_SCRATCH_1: + case ARIZONA_DSP4_SCRATCH_2: + case ARIZONA_DSP4_SCRATCH_3: return true; default: return false; @@ -2347,25 +2370,41 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg) case ARIZONA_INTERRUPT_RAW_STATUS_7: case ARIZONA_INTERRUPT_RAW_STATUS_8: case ARIZONA_IRQ_PIN_STATUS: + case ARIZONA_AOD_WKUP_AND_TRIG: case ARIZONA_AOD_IRQ1: case ARIZONA_AOD_IRQ2: + case ARIZONA_AOD_IRQ_RAW_STATUS: case ARIZONA_FX_CTRL2: case ARIZONA_ASRC_STATUS: case ARIZONA_DSP_STATUS: - case ARIZONA_DSP1_CONTROL_1: - case ARIZONA_DSP1_CLOCKING_1: case ARIZONA_DSP1_STATUS_1: case ARIZONA_DSP1_STATUS_2: case ARIZONA_DSP1_STATUS_3: + case ARIZONA_DSP1_SCRATCH_0: + case ARIZONA_DSP1_SCRATCH_1: + case ARIZONA_DSP1_SCRATCH_2: + case ARIZONA_DSP1_SCRATCH_3: case ARIZONA_DSP2_STATUS_1: case ARIZONA_DSP2_STATUS_2: case ARIZONA_DSP2_STATUS_3: + case ARIZONA_DSP2_SCRATCH_0: + case ARIZONA_DSP2_SCRATCH_1: + case ARIZONA_DSP2_SCRATCH_2: + case ARIZONA_DSP2_SCRATCH_3: case ARIZONA_DSP3_STATUS_1: case ARIZONA_DSP3_STATUS_2: case ARIZONA_DSP3_STATUS_3: + case ARIZONA_DSP3_SCRATCH_0: + case ARIZONA_DSP3_SCRATCH_1: + case ARIZONA_DSP3_SCRATCH_2: + case ARIZONA_DSP3_SCRATCH_3: case ARIZONA_DSP4_STATUS_1: case ARIZONA_DSP4_STATUS_2: case ARIZONA_DSP4_STATUS_3: + case ARIZONA_DSP4_SCRATCH_0: + case ARIZONA_DSP4_SCRATCH_1: + case ARIZONA_DSP4_SCRATCH_2: + case ARIZONA_DSP4_SCRATCH_3: return true; default: return false; diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index e1c283e6d4e5..030827511667 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -33,84 +33,6 @@ #include "wm8994.h" -/** - * wm8994_reg_read: Read a single WM8994 register. - * - * @wm8994: Device to read from. - * @reg: Register to read. - */ -int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg) -{ - unsigned int val; - int ret; - - ret = regmap_read(wm8994->regmap, reg, &val); - - if (ret < 0) - return ret; - else - return val; -} -EXPORT_SYMBOL_GPL(wm8994_reg_read); - -/** - * wm8994_bulk_read: Read multiple WM8994 registers - * - * @wm8994: Device to read from - * @reg: First register - * @count: Number of registers - * @buf: Buffer to fill. The data will be returned big endian. - */ -int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg, - int count, u16 *buf) -{ - return regmap_bulk_read(wm8994->regmap, reg, buf, count); -} - -/** - * wm8994_reg_write: Write a single WM8994 register. - * - * @wm8994: Device to write to. - * @reg: Register to write to. - * @val: Value to write. - */ -int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg, - unsigned short val) -{ - return regmap_write(wm8994->regmap, reg, val); -} -EXPORT_SYMBOL_GPL(wm8994_reg_write); - -/** - * wm8994_bulk_write: Write multiple WM8994 registers - * - * @wm8994: Device to write to - * @reg: First register - * @count: Number of registers - * @buf: Buffer to write from. Data must be big-endian formatted. - */ -int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg, - int count, const u16 *buf) -{ - return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16)); -} -EXPORT_SYMBOL_GPL(wm8994_bulk_write); - -/** - * wm8994_set_bits: Set the value of a bitfield in a WM8994 register - * - * @wm8994: Device to write to. - * @reg: Register to write to. - * @mask: Mask of bits to set. - * @val: Value to set (unshifted) - */ -int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg, - unsigned short mask, unsigned short val) -{ - return regmap_update_bits(wm8994->regmap, reg, mask, val); -} -EXPORT_SYMBOL_GPL(wm8994_set_bits); - static struct mfd_cell wm8994_regulator_devs[] = { { .name = "wm8994-ldo", diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index e760715bd9cb..a3e291d0df9a 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -381,19 +381,6 @@ config HMC6352 This driver provides support for the Honeywell HMC6352 compass, providing configuration and heading data via sysfs. -config EP93XX_PWM - tristate "EP93xx PWM support" - depends on ARCH_EP93XX - help - This option enables device driver support for the PWM channels - on the Cirrus EP93xx processors. The EP9307 chip only has one - PWM channel all the others have two, the second channel is an - alternate function of the EGPIO14 pin. A sysfs interface is - provided to control the PWM channels. - - To compile this driver as a module, choose M here: the module will - be called ep93xx_pwm. - config DS1682 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" depends on I2C diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 0b7ea3ea8bb8..f45473e68bf7 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_APDS9802ALS) += apds9802als.o obj-$(CONFIG_ISL29003) += isl29003.o obj-$(CONFIG_ISL29020) += isl29020.o obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o -obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o obj-$(CONFIG_C2PORT) += c2port/ diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 08b18f3f5264..9e2b985293fc 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c @@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; dma_addr_t dst, src; - unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | - DMA_COMPL_SKIP_SRC_UNMAP; + unsigned long dma_flags = 0; dst_sg = buf->vb.sglist; dst_nents = buf->vb.sglen; diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 94b8a3324319..d87f77f790d6 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -22,7 +22,7 @@ #include <linux/jiffies.h> #include <linux/of.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> /* * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable. diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c deleted file mode 100644 index cdb67a9c1959..000000000000 --- a/drivers/misc/ep93xx_pwm.c +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Simple PWM driver for EP93XX - * - * (c) Copyright 2009 Matthieu Crapet <mcrapet@gmail.com> - * (c) Copyright 2009 H Hartley Sweeten <hsweeten@visionengravers.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * EP9307 has only one channel: - * - PWMOUT - * - * EP9301/02/12/15 have two channels: - * - PWMOUT - * - PWMOUT1 (alternate function for EGPIO14) - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/io.h> - -#include <mach/platform.h> - -#define EP93XX_PWMx_TERM_COUNT 0x00 -#define EP93XX_PWMx_DUTY_CYCLE 0x04 -#define EP93XX_PWMx_ENABLE 0x08 -#define EP93XX_PWMx_INVERT 0x0C - -#define EP93XX_PWM_MAX_COUNT 0xFFFF - -struct ep93xx_pwm { - void __iomem *mmio_base; - struct clk *clk; - u32 duty_percent; -}; - -/* - * /sys/devices/platform/ep93xx-pwm.N - * /min_freq read-only minimum pwm output frequency - * /max_req read-only maximum pwm output frequency - * /freq read-write pwm output frequency (0 = disable output) - * /duty_percent read-write pwm duty cycle percent (1..99) - * /invert read-write invert pwm output - */ - -static ssize_t ep93xx_pwm_get_min_freq(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - unsigned long rate = clk_get_rate(pwm->clk); - - return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1)); -} - -static ssize_t ep93xx_pwm_get_max_freq(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - unsigned long rate = clk_get_rate(pwm->clk); - - return sprintf(buf, "%ld\n", rate / 2); -} - -static ssize_t ep93xx_pwm_get_freq(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - - if (readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1) { - unsigned long rate = clk_get_rate(pwm->clk); - u16 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); - - return sprintf(buf, "%ld\n", rate / (term + 1)); - } else { - return sprintf(buf, "disabled\n"); - } -} - -static ssize_t ep93xx_pwm_set_freq(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err) - return -EINVAL; - - if (val == 0) { - writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE); - } else if (val <= (clk_get_rate(pwm->clk) / 2)) { - u32 term, duty; - - val = (clk_get_rate(pwm->clk) / val) - 1; - if (val > EP93XX_PWM_MAX_COUNT) - val = EP93XX_PWM_MAX_COUNT; - if (val < 1) - val = 1; - - term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); - duty = ((val + 1) * pwm->duty_percent / 100) - 1; - - /* If pwm is running, order is important */ - if (val > term) { - writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); - writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); - } else { - writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); - writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); - } - - if (!readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1) - writel(0x1, pwm->mmio_base + EP93XX_PWMx_ENABLE); - } else { - return -EINVAL; - } - - return count; -} - -static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - - return sprintf(buf, "%d\n", pwm->duty_percent); -} - -static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err) - return -EINVAL; - - if (val > 0 && val < 100) { - u32 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); - u32 duty = ((term + 1) * val / 100) - 1; - - writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); - pwm->duty_percent = val; - return count; - } - - return -EINVAL; -} - -static ssize_t ep93xx_pwm_get_invert(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - int inverted = readl(pwm->mmio_base + EP93XX_PWMx_INVERT) & 0x1; - - return sprintf(buf, "%d\n", inverted); -} - -static ssize_t ep93xx_pwm_set_invert(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err) - return -EINVAL; - - if (val == 0) - writel(0x0, pwm->mmio_base + EP93XX_PWMx_INVERT); - else if (val == 1) - writel(0x1, pwm->mmio_base + EP93XX_PWMx_INVERT); - else - return -EINVAL; - - return count; -} - -static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); -static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); -static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO, - ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); -static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO, - ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); -static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO, - ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); - -static struct attribute *ep93xx_pwm_attrs[] = { - &dev_attr_min_freq.attr, - &dev_attr_max_freq.attr, - &dev_attr_freq.attr, - &dev_attr_duty_percent.attr, - &dev_attr_invert.attr, - NULL -}; - -static const struct attribute_group ep93xx_pwm_sysfs_files = { - .attrs = ep93xx_pwm_attrs, -}; - -static int ep93xx_pwm_probe(struct platform_device *pdev) -{ - struct ep93xx_pwm *pwm; - struct resource *res; - int ret; - - pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); - if (!pwm) - return -ENOMEM; - - pwm->clk = devm_clk_get(&pdev->dev, "pwm_clk"); - if (IS_ERR(pwm->clk)) - return PTR_ERR(pwm->clk); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pwm->mmio_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pwm->mmio_base)) - return PTR_ERR(pwm->mmio_base); - - ret = ep93xx_pwm_acquire_gpio(pdev); - if (ret) - return ret; - - ret = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); - if (ret) { - ep93xx_pwm_release_gpio(pdev); - return ret; - } - - pwm->duty_percent = 50; - - /* disable pwm at startup. Avoids zero value. */ - writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE); - writel(EP93XX_PWM_MAX_COUNT, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); - writel(EP93XX_PWM_MAX_COUNT/2, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); - - clk_enable(pwm->clk); - - platform_set_drvdata(pdev, pwm); - return 0; -} - -static int ep93xx_pwm_remove(struct platform_device *pdev) -{ - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - - writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE); - clk_disable(pwm->clk); - sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); - ep93xx_pwm_release_gpio(pdev); - - return 0; -} - -static struct platform_driver ep93xx_pwm_driver = { - .driver = { - .name = "ep93xx-pwm", - .owner = THIS_MODULE, - }, - .probe = ep93xx_pwm_probe, - .remove = ep93xx_pwm_remove, -}; -module_platform_driver(ep93xx_pwm_driver); - -MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, " - "H Hartley Sweeten <hsweeten@visionengravers.com>"); -MODULE_DESCRIPTION("EP93xx PWM driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:ep93xx-pwm"); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 1a3163f1407e..29d5d988a51c 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2448,7 +2448,6 @@ static int _mmc_blk_suspend(struct mmc_card *card) struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { - pm_runtime_get_sync(&card->dev); mmc_queue_suspend(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_suspend(&part_md->queue); @@ -2483,7 +2482,6 @@ static int mmc_blk_resume(struct mmc_card *card) list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } - pm_runtime_put(&card->dev); } return 0; } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 3e227bd91e81..64145a32b917 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -342,7 +342,7 @@ int mmc_add_card(struct mmc_card *card) break; } - if (mmc_sd_card_uhs(card) && + if (mmc_card_uhs(card) && (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index bf18b6bfce48..57a2b403bf8e 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -23,6 +23,7 @@ #include <linux/log2.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeup.h> #include <linux/suspend.h> #include <linux/fault-inject.h> #include <linux/random.h> @@ -301,7 +302,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) } err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal); + EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true); if (err) { pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); @@ -918,31 +919,6 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) EXPORT_SYMBOL(__mmc_claim_host); /** - * mmc_try_claim_host - try exclusively to claim a host - * @host: mmc host to claim - * - * Returns %1 if the host is claimed, %0 otherwise. - */ -int mmc_try_claim_host(struct mmc_host *host) -{ - int claimed_host = 0; - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - if (!host->claimed || host->claimer == current) { - host->claimed = 1; - host->claimer = current; - host->claim_cnt += 1; - claimed_host = 1; - } - spin_unlock_irqrestore(&host->lock, flags); - if (host->ops->enable && claimed_host && host->claim_cnt == 1) - host->ops->enable(host); - return claimed_host; -} -EXPORT_SYMBOL(mmc_try_claim_host); - -/** * mmc_release_host - release a host * @host: mmc host to release * @@ -1382,22 +1358,31 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) { int bit; - ocr &= host->ocr_avail; + /* + * Sanity check the voltages that the card claims to + * support. + */ + if (ocr & 0x7F) { + dev_warn(mmc_dev(host), + "card claims to support voltages below defined range\n"); + ocr &= ~0x7F; + } - bit = ffs(ocr); - if (bit) { - bit -= 1; + ocr &= host->ocr_avail; + if (!ocr) { + dev_warn(mmc_dev(host), "no support for card's volts\n"); + return 0; + } + if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { + bit = ffs(ocr) - 1; ocr &= 3 << bit; - - mmc_host_clk_hold(host); - host->ios.vdd = bit; - mmc_set_ios(host); - mmc_host_clk_release(host); + mmc_power_cycle(host, ocr); } else { - pr_warning("%s: host doesn't support card's voltages\n", - mmc_hostname(host)); - ocr = 0; + bit = fls(ocr) - 1; + ocr &= 3 << bit; + if (bit != host->ios.vdd) + dev_warn(mmc_dev(host), "exceeding card's volts\n"); } return ocr; @@ -1422,7 +1407,7 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) } -int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) { struct mmc_command cmd = {0}; int err = 0; @@ -1504,7 +1489,7 @@ power_cycle: if (err) { pr_debug("%s: Signal voltage switch failed, " "power cycling card\n", mmc_hostname(host)); - mmc_power_cycle(host); + mmc_power_cycle(host, ocr); } mmc_host_clk_release(host); @@ -1545,22 +1530,14 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ -void mmc_power_up(struct mmc_host *host) +void mmc_power_up(struct mmc_host *host, u32 ocr) { - int bit; - if (host->ios.power_mode == MMC_POWER_ON) return; mmc_host_clk_hold(host); - /* If ocr is set, we use it */ - if (host->ocr) - bit = ffs(host->ocr) - 1; - else - bit = fls(host->ocr_avail) - 1; - - host->ios.vdd = bit; + host->ios.vdd = fls(ocr) - 1; if (mmc_host_is_spi(host)) host->ios.chip_select = MMC_CS_HIGH; else @@ -1604,13 +1581,6 @@ void mmc_power_off(struct mmc_host *host) host->ios.clock = 0; host->ios.vdd = 0; - - /* - * Reset ocr mask to be the highest possible voltage supported for - * this mmc host. This value will be used at next power up. - */ - host->ocr = 1 << (fls(host->ocr_avail) - 1); - if (!mmc_host_is_spi(host)) { host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; @@ -1630,12 +1600,12 @@ void mmc_power_off(struct mmc_host *host) mmc_host_clk_release(host); } -void mmc_power_cycle(struct mmc_host *host) +void mmc_power_cycle(struct mmc_host *host, u32 ocr) { mmc_power_off(host); /* Wait at least 1 ms according to SD spec */ mmc_delay(1); - mmc_power_up(host); + mmc_power_up(host, ocr); } /* @@ -1723,6 +1693,28 @@ void mmc_detach_bus(struct mmc_host *host) mmc_bus_put(host); } +static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, + bool cd_irq) +{ +#ifdef CONFIG_MMC_DEBUG + unsigned long flags; + spin_lock_irqsave(&host->lock, flags); + WARN_ON(host->removed); + spin_unlock_irqrestore(&host->lock, flags); +#endif + + /* + * If the device is configured as wakeup, we prevent a new sleep for + * 5 s to give provision for user space to consume the event. + */ + if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && + device_can_wakeup(mmc_dev(host))) + pm_wakeup_event(mmc_dev(host), 5000); + + host->detect_change = 1; + mmc_schedule_delayed_work(&host->detect, delay); +} + /** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. @@ -1735,16 +1727,8 @@ void mmc_detach_bus(struct mmc_host *host) */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { -#ifdef CONFIG_MMC_DEBUG - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - WARN_ON(host->removed); - spin_unlock_irqrestore(&host->lock, flags); -#endif - host->detect_change = 1; - mmc_schedule_delayed_work(&host->detect, delay); + _mmc_detect_change(host, delay, true); } - EXPORT_SYMBOL(mmc_detect_change); void mmc_init_erase(struct mmc_card *card) @@ -2334,7 +2318,7 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) pr_info("%s: %s: trying to init card at %u Hz\n", mmc_hostname(host), __func__, host->f_init); #endif - mmc_power_up(host); + mmc_power_up(host, host->ocr_avail); /* * Some eMMCs (with VCCQ always on) may not be reset after power up, so @@ -2423,7 +2407,7 @@ int mmc_detect_card_removed(struct mmc_host *host) * rescan handle the card removal. */ cancel_delayed_work(&host->detect); - mmc_detect_change(host, 0); + _mmc_detect_change(host, 0, false); } } @@ -2504,8 +2488,8 @@ void mmc_start_host(struct mmc_host *host) if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) mmc_power_off(host); else - mmc_power_up(host); - mmc_detect_change(host, 0); + mmc_power_up(host, host->ocr_avail); + _mmc_detect_change(host, 0, false); } void mmc_stop_host(struct mmc_host *host) @@ -2583,7 +2567,7 @@ int mmc_power_restore_host(struct mmc_host *host) return -EINVAL; } - mmc_power_up(host); + mmc_power_up(host, host->card->ocr); ret = host->bus_ops->power_restore(host); mmc_bus_put(host); @@ -2657,28 +2641,6 @@ EXPORT_SYMBOL(mmc_cache_ctrl); #ifdef CONFIG_PM -/** - * mmc_suspend_host - suspend a host - * @host: mmc host - */ -int mmc_suspend_host(struct mmc_host *host) -{ - /* This function is deprecated */ - return 0; -} -EXPORT_SYMBOL(mmc_suspend_host); - -/** - * mmc_resume_host - resume a previously suspended host - * @host: mmc host - */ -int mmc_resume_host(struct mmc_host *host) -{ - /* This function is deprecated */ - return 0; -} -EXPORT_SYMBOL(mmc_resume_host); - /* Do the card removal on suspend if card is assumed removeable * Do that in pm notifier while userspace isn't yet frozen, so we will be able to sync the card. @@ -2724,7 +2686,7 @@ int mmc_pm_notify(struct notifier_block *notify_block, spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 0; spin_unlock_irqrestore(&host->lock, flags); - mmc_detect_change(host, 0); + _mmc_detect_change(host, 0, false); } diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 5345d156493e..443a584660f0 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -42,13 +42,13 @@ void mmc_set_ungated(struct mmc_host *host); void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_width(struct mmc_host *host, unsigned int width); u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); -int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr); int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); void mmc_set_timing(struct mmc_host *host, unsigned int timing); void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); -void mmc_power_up(struct mmc_host *host); +void mmc_power_up(struct mmc_host *host, u32 ocr); void mmc_power_off(struct mmc_host *host); -void mmc_power_cycle(struct mmc_host *host); +void mmc_power_cycle(struct mmc_host *host, u32 ocr); static inline void mmc_delay(unsigned int ms) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 6d02012a1d0b..f631f5a9bf79 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/stat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -934,6 +935,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, goto err; } + card->ocr = ocr; card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); @@ -1404,9 +1406,9 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) if (notify_type == EXT_CSD_POWER_OFF_LONG) timeout = card->ext_csd.power_off_longtime; - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_POWER_OFF_NOTIFICATION, - notify_type, timeout); + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_POWER_OFF_NOTIFICATION, + notify_type, timeout, true, false); if (err) pr_err("%s: Power Off Notification timed out, %u\n", mmc_hostname(card->host), timeout); @@ -1477,6 +1479,9 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) mmc_claim_host(host); + if (mmc_card_suspended(host->card)) + goto out; + if (mmc_card_doing_bkops(host->card)) { err = mmc_stop_bkops(host->card); if (err) @@ -1496,51 +1501,93 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) err = mmc_deselect_cards(host); host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); - if (!err) + if (!err) { mmc_power_off(host); + mmc_card_set_suspended(host->card); + } out: mmc_release_host(host); return err; } /* - * Suspend callback from host. + * Suspend callback */ static int mmc_suspend(struct mmc_host *host) { - return _mmc_suspend(host, true); -} + int err; -/* - * Shutdown callback - */ -static int mmc_shutdown(struct mmc_host *host) -{ - return _mmc_suspend(host, false); + err = _mmc_suspend(host, true); + if (!err) { + pm_runtime_disable(&host->card->dev); + pm_runtime_set_suspended(&host->card->dev); + } + + return err; } /* - * Resume callback from host. - * * This function tries to determine if the same card is still present * and, if so, restore all state to it. */ -static int mmc_resume(struct mmc_host *host) +static int _mmc_resume(struct mmc_host *host) { - int err; + int err = 0; BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - mmc_power_up(host); - mmc_select_voltage(host, host->ocr); - err = mmc_init_card(host, host->ocr, host->card); + + if (!mmc_card_suspended(host->card)) + goto out; + + mmc_power_up(host, host->card->ocr); + err = mmc_init_card(host, host->card->ocr, host->card); + mmc_card_clr_suspended(host->card); + +out: mmc_release_host(host); + return err; +} + +/* + * Shutdown callback + */ +static int mmc_shutdown(struct mmc_host *host) +{ + int err = 0; + + /* + * In a specific case for poweroff notify, we need to resume the card + * before we can shutdown it properly. + */ + if (mmc_can_poweroff_notify(host->card) && + !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)) + err = _mmc_resume(host); + + if (!err) + err = _mmc_suspend(host, false); return err; } +/* + * Callback for resume. + */ +static int mmc_resume(struct mmc_host *host) +{ + int err = 0; + + if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { + err = _mmc_resume(host); + pm_runtime_set_active(&host->card->dev); + pm_runtime_mark_last_busy(&host->card->dev); + } + pm_runtime_enable(&host->card->dev); + + return err; +} /* * Callback for runtime_suspend. @@ -1552,18 +1599,11 @@ static int mmc_runtime_suspend(struct mmc_host *host) if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) return 0; - mmc_claim_host(host); - - err = mmc_suspend(host); - if (err) { + err = _mmc_suspend(host, true); + if (err) pr_err("%s: error %d doing aggessive suspend\n", mmc_hostname(host), err); - goto out; - } - mmc_power_off(host); -out: - mmc_release_host(host); return err; } @@ -1574,18 +1614,14 @@ static int mmc_runtime_resume(struct mmc_host *host) { int err; - if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) + if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) return 0; - mmc_claim_host(host); - - mmc_power_up(host); - err = mmc_resume(host); + err = _mmc_resume(host); if (err) pr_err("%s: error %d doing aggessive resume\n", mmc_hostname(host), err); - mmc_release_host(host); return 0; } @@ -1595,7 +1631,7 @@ static int mmc_power_restore(struct mmc_host *host) host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); mmc_claim_host(host); - ret = mmc_init_card(host, host->ocr, host->card); + ret = mmc_init_card(host, host->card->ocr, host->card); mmc_release_host(host); return ret; @@ -1640,7 +1676,7 @@ static void mmc_attach_bus_ops(struct mmc_host *host) int mmc_attach_mmc(struct mmc_host *host) { int err; - u32 ocr; + u32 ocr, rocr; BUG_ON(!host); WARN_ON(!host->claimed); @@ -1666,23 +1702,12 @@ int mmc_attach_mmc(struct mmc_host *host) goto err; } - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1690,7 +1715,7 @@ int mmc_attach_mmc(struct mmc_host *host) /* * Detect and init the card. */ - err = mmc_init_card(host, host->ocr, NULL); + err = mmc_init_card(host, rocr, NULL); if (err) goto err; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index ef183483d5b6..e5b5eeb548d1 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -23,6 +23,40 @@ #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +static inline int __mmc_send_status(struct mmc_card *card, u32 *status, + bool ignore_crc) +{ + int err; + struct mmc_command cmd = {0}; + + BUG_ON(!card); + BUG_ON(!card->host); + + cmd.opcode = MMC_SEND_STATUS; + if (!mmc_host_is_spi(card->host)) + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; + if (ignore_crc) + cmd.flags &= ~MMC_RSP_CRC; + + err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (err) + return err; + + /* NOTE: callers are required to understand the difference + * between "native" and SPI format status words! + */ + if (status) + *status = cmd.resp[0]; + + return 0; +} + +int mmc_send_status(struct mmc_card *card, u32 *status) +{ + return __mmc_send_status(card, status, false); +} + static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { int err; @@ -370,16 +404,18 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) * @timeout_ms: timeout (ms) for operation performed by register write, * timeout of zero implies maximum possible timeout * @use_busy_signal: use the busy signal as response type + * @send_status: send status cmd to poll for busy * * Modifies the EXT_CSD register for selected card. */ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, - unsigned int timeout_ms, bool use_busy_signal) + unsigned int timeout_ms, bool use_busy_signal, bool send_status) { int err; struct mmc_command cmd = {0}; unsigned long timeout; - u32 status; + u32 status = 0; + bool ignore_crc = false; BUG_ON(!card); BUG_ON(!card->host); @@ -408,17 +444,37 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (!use_busy_signal) return 0; - /* Must check status to be sure of no errors */ + /* + * Must check status to be sure of no errors + * If CMD13 is to check the busy completion of the timing change, + * disable the check of CRC error. + */ + if (index == EXT_CSD_HS_TIMING && + !(card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)) + ignore_crc = true; + timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS); do { - err = mmc_send_status(card, &status); - if (err) - return err; + if (send_status) { + err = __mmc_send_status(card, &status, ignore_crc); + if (err) + return err; + } if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) break; if (mmc_host_is_spi(card->host)) break; + /* + * We are not allowed to issue a status command and the host + * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only + * rely on waiting for the stated timeout to be sufficient. + */ + if (!send_status) { + mmc_delay(timeout_ms); + return 0; + } + /* Timeout if the device never leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state! %s\n", @@ -445,36 +501,10 @@ EXPORT_SYMBOL_GPL(__mmc_switch); int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms) { - return __mmc_switch(card, set, index, value, timeout_ms, true); + return __mmc_switch(card, set, index, value, timeout_ms, true, true); } EXPORT_SYMBOL_GPL(mmc_switch); -int mmc_send_status(struct mmc_card *card, u32 *status) -{ - int err; - struct mmc_command cmd = {0}; - - BUG_ON(!card); - BUG_ON(!card->host); - - cmd.opcode = MMC_SEND_STATUS; - if (!mmc_host_is_spi(card->host)) - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); - if (err) - return err; - - /* NOTE: callers are required to understand the difference - * between "native" and SPI format status words! - */ - if (status) - *status = cmd.resp[0]; - - return 0; -} - static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, u8 len) diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 5e8823dc3ef6..6f42050b7ccc 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/stat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -721,6 +722,7 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) int err; u32 max_current; int retries = 10; + u32 pocr = ocr; try_again: if (!retries) { @@ -773,7 +775,8 @@ try_again: */ if (!mmc_host_is_spi(host) && rocr && ((*rocr & 0x41000000) == 0x41000000)) { - err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, + pocr); if (err == -EAGAIN) { retries--; goto try_again; @@ -935,6 +938,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (IS_ERR(card)) return PTR_ERR(card); + card->ocr = ocr; card->type = MMC_TYPE_SD; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } @@ -1064,10 +1068,7 @@ static void mmc_sd_detect(struct mmc_host *host) } } -/* - * Suspend callback from host. - */ -static int mmc_sd_suspend(struct mmc_host *host) +static int _mmc_sd_suspend(struct mmc_host *host) { int err = 0; @@ -1075,34 +1076,77 @@ static int mmc_sd_suspend(struct mmc_host *host) BUG_ON(!host->card); mmc_claim_host(host); + + if (mmc_card_suspended(host->card)) + goto out; + if (!mmc_host_is_spi(host)) err = mmc_deselect_cards(host); host->card->state &= ~MMC_STATE_HIGHSPEED; - if (!err) + if (!err) { mmc_power_off(host); + mmc_card_set_suspended(host->card); + } + +out: mmc_release_host(host); + return err; +} + +/* + * Callback for suspend + */ +static int mmc_sd_suspend(struct mmc_host *host) +{ + int err; + + err = _mmc_sd_suspend(host); + if (!err) { + pm_runtime_disable(&host->card->dev); + pm_runtime_set_suspended(&host->card->dev); + } return err; } /* - * Resume callback from host. - * * This function tries to determine if the same card is still present * and, if so, restore all state to it. */ -static int mmc_sd_resume(struct mmc_host *host) +static int _mmc_sd_resume(struct mmc_host *host) { - int err; + int err = 0; BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - mmc_power_up(host); - mmc_select_voltage(host, host->ocr); - err = mmc_sd_init_card(host, host->ocr, host->card); + + if (!mmc_card_suspended(host->card)) + goto out; + + mmc_power_up(host, host->card->ocr); + err = mmc_sd_init_card(host, host->card->ocr, host->card); + mmc_card_clr_suspended(host->card); + +out: mmc_release_host(host); + return err; +} + +/* + * Callback for resume + */ +static int mmc_sd_resume(struct mmc_host *host) +{ + int err = 0; + + if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { + err = _mmc_sd_resume(host); + pm_runtime_set_active(&host->card->dev); + pm_runtime_mark_last_busy(&host->card->dev); + } + pm_runtime_enable(&host->card->dev); return err; } @@ -1117,18 +1161,11 @@ static int mmc_sd_runtime_suspend(struct mmc_host *host) if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) return 0; - mmc_claim_host(host); - - err = mmc_sd_suspend(host); - if (err) { + err = _mmc_sd_suspend(host); + if (err) pr_err("%s: error %d doing aggessive suspend\n", mmc_hostname(host), err); - goto out; - } - mmc_power_off(host); -out: - mmc_release_host(host); return err; } @@ -1139,18 +1176,14 @@ static int mmc_sd_runtime_resume(struct mmc_host *host) { int err; - if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) + if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) return 0; - mmc_claim_host(host); - - mmc_power_up(host); - err = mmc_sd_resume(host); + err = _mmc_sd_resume(host); if (err) pr_err("%s: error %d doing aggessive resume\n", mmc_hostname(host), err); - mmc_release_host(host); return 0; } @@ -1160,7 +1193,7 @@ static int mmc_sd_power_restore(struct mmc_host *host) host->card->state &= ~MMC_STATE_HIGHSPEED; mmc_claim_host(host); - ret = mmc_sd_init_card(host, host->ocr, host->card); + ret = mmc_sd_init_card(host, host->card->ocr, host->card); mmc_release_host(host); return ret; @@ -1205,7 +1238,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host) int mmc_attach_sd(struct mmc_host *host) { int err; - u32 ocr; + u32 ocr, rocr; BUG_ON(!host); WARN_ON(!host->claimed); @@ -1229,31 +1262,12 @@ int mmc_attach_sd(struct mmc_host *host) goto err; } - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - - if ((ocr & MMC_VDD_165_195) && - !(host->ocr_avail_sd & MMC_VDD_165_195)) { - pr_warning("%s: SD card claims to support the " - "incompletely defined 'low voltage range'. This " - "will be ignored.\n", mmc_hostname(host)); - ocr &= ~MMC_VDD_165_195; - } - - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1261,7 +1275,7 @@ int mmc_attach_sd(struct mmc_host *host) /* * Detect and init the card. */ - err = mmc_sd_init_card(host, host->ocr, NULL); + err = mmc_sd_init_card(host, rocr, NULL); if (err) goto err; diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 80d89cff7306..4d721c6e2af0 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -593,23 +593,28 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *card; int err; int retries = 10; + u32 rocr = 0; + u32 ocr_card = ocr; BUG_ON(!host); WARN_ON(!host->claimed); + /* to query card if 1.8V signalling is supported */ + if (mmc_host_uhs(host)) + ocr |= R4_18V_PRESENT; + try_again: if (!retries) { pr_warning("%s: Skipping voltage switch\n", mmc_hostname(host)); ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } /* * Inform the card of the voltage */ if (!powered_resume) { - err = mmc_send_io_op_cond(host, host->ocr, &ocr); + err = mmc_send_io_op_cond(host, ocr, &rocr); if (err) goto err; } @@ -632,8 +637,8 @@ try_again: goto err; } - if ((ocr & R4_MEMORY_PRESENT) && - mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) { + if ((rocr & R4_MEMORY_PRESENT) && + mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) { card->type = MMC_TYPE_SD_COMBO; if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || @@ -663,8 +668,9 @@ try_again: * systems that claim 1.8v signalling in fact do not support * it. */ - if (!powered_resume && (ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) { - err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); + if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, + ocr); if (err == -EAGAIN) { sdio_reset(host); mmc_go_idle(host); @@ -674,12 +680,10 @@ try_again: goto try_again; } else if (err) { ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } err = 0; } else { ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } /* @@ -759,6 +763,7 @@ try_again: card = oldcard; } + card->ocr = ocr_card; mmc_fixup_device(card, NULL); if (card->type == MMC_TYPE_SD_COMBO) { @@ -981,8 +986,7 @@ static int mmc_sdio_resume(struct mmc_host *host) /* Restore power if needed */ if (!mmc_card_keep_power(host)) { - mmc_power_up(host); - mmc_select_voltage(host, host->ocr); + mmc_power_up(host, host->card->ocr); /* * Tell runtime PM core we just powered up the card, * since it still believes the card is powered off. @@ -1000,7 +1004,7 @@ static int mmc_sdio_resume(struct mmc_host *host) if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { sdio_reset(host); mmc_go_idle(host); - err = mmc_sdio_init_card(host, host->ocr, host->card, + err = mmc_sdio_init_card(host, host->card->ocr, host->card, mmc_card_keep_power(host)); } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { /* We may have switched to 1-bit mode during suspend */ @@ -1040,7 +1044,6 @@ static int mmc_sdio_resume(struct mmc_host *host) static int mmc_sdio_power_restore(struct mmc_host *host) { int ret; - u32 ocr; BUG_ON(!host); BUG_ON(!host->card); @@ -1062,32 +1065,17 @@ static int mmc_sdio_power_restore(struct mmc_host *host) * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and * harmless in other situations. * - * With these steps taken, mmc_select_voltage() is also required to - * restore the correct voltage setting of the card. */ sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); - ret = mmc_send_io_op_cond(host, 0, &ocr); + ret = mmc_send_io_op_cond(host, 0, NULL); if (ret) goto out; - if (host->ocr_avail_sdio) - host->ocr_avail = host->ocr_avail_sdio; - - host->ocr = mmc_select_voltage(host, ocr & ~0x7F); - if (!host->ocr) { - ret = -EINVAL; - goto out; - } - - if (mmc_host_uhs(host)) - /* to query card if 1.8V signalling is supported */ - host->ocr |= R4_18V_PRESENT; - - ret = mmc_sdio_init_card(host, host->ocr, host->card, + ret = mmc_sdio_init_card(host, host->card->ocr, host->card, mmc_card_keep_power(host)); if (!ret && host->sdio_irqs) mmc_signal_sdio_irq(host); @@ -1108,7 +1096,7 @@ static int mmc_sdio_runtime_suspend(struct mmc_host *host) static int mmc_sdio_runtime_resume(struct mmc_host *host) { /* Restore power and re-initialize. */ - mmc_power_up(host); + mmc_power_up(host, host->card->ocr); return mmc_sdio_power_restore(host); } @@ -1131,7 +1119,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = { int mmc_attach_sdio(struct mmc_host *host) { int err, i, funcs; - u32 ocr; + u32 ocr, rocr; struct mmc_card *card; BUG_ON(!host); @@ -1145,23 +1133,13 @@ int mmc_attach_sdio(struct mmc_host *host) if (host->ocr_avail_sdio) host->ocr_avail = host->ocr_avail_sdio; - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1169,22 +1147,10 @@ int mmc_attach_sdio(struct mmc_host *host) /* * Detect and init the card. */ - if (mmc_host_uhs(host)) - /* to query card if 1.8V signalling is supported */ - host->ocr |= R4_18V_PRESENT; + err = mmc_sdio_init_card(host, rocr, NULL, 0); + if (err) + goto err; - err = mmc_sdio_init_card(host, host->ocr, NULL, 0); - if (err) { - if (err == -EAGAIN) { - /* - * Retry initialization with S18R set to 0. - */ - host->ocr &= ~R4_18V_PRESENT; - err = mmc_sdio_init_card(host, host->ocr, NULL, 0); - } - if (err) - goto err; - } card = host->card; /* diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index ef8956568c3a..157b570ba343 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -308,8 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func) struct mmc_host *host = func->card->host; u64 addr = (host->slotno << 16) | func->num; - ACPI_HANDLE_SET(&func->dev, - acpi_get_child(ACPI_HANDLE(host->parent), addr)); + acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr); } #else static inline void sdio_acpi_set_handle(struct sdio_func *func) {} diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 69e438ee043e..2cbb4516d353 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -255,7 +255,6 @@ struct atmel_mci_slot { #define ATMCI_CARD_PRESENT 0 #define ATMCI_CARD_NEED_INIT 1 #define ATMCI_SHUTDOWN 2 -#define ATMCI_SUSPENDED 3 int detect_pin; int wp_pin; @@ -589,6 +588,13 @@ static void atmci_timeout_timer(unsigned long data) if (host->mrq->cmd->data) { host->mrq->cmd->data->error = -ETIMEDOUT; host->data = NULL; + /* + * With some SDIO modules, sometimes DMA transfer hangs. If + * stop_transfer() is not called then the DMA request is not + * removed, following ones are queued and never computed. + */ + if (host->state == STATE_DATA_XFER) + host->stop_transfer(host); } else { host->mrq->cmd->error = -ETIMEDOUT; host->cmd = NULL; @@ -1803,12 +1809,14 @@ static void atmci_tasklet_func(unsigned long priv) if (unlikely(status)) { host->stop_transfer(host); host->data = NULL; - if (status & ATMCI_DTOE) { - data->error = -ETIMEDOUT; - } else if (status & ATMCI_DCRCE) { - data->error = -EILSEQ; - } else { - data->error = -EIO; + if (data) { + if (status & ATMCI_DTOE) { + data->error = -ETIMEDOUT; + } else if (status & ATMCI_DCRCE) { + data->error = -EILSEQ; + } else { + data->error = -EIO; + } } } @@ -2520,70 +2528,10 @@ static int __exit atmci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int atmci_suspend(struct device *dev) -{ - struct atmel_mci *host = dev_get_drvdata(dev); - int i; - - for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { - struct atmel_mci_slot *slot = host->slot[i]; - int ret; - - if (!slot) - continue; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slot[i]; - if (slot - && test_bit(ATMCI_SUSPENDED, &slot->flags)) { - mmc_resume_host(host->slot[i]->mmc); - clear_bit(ATMCI_SUSPENDED, &slot->flags); - } - } - return ret; - } else { - set_bit(ATMCI_SUSPENDED, &slot->flags); - } - } - - return 0; -} - -static int atmci_resume(struct device *dev) -{ - struct atmel_mci *host = dev_get_drvdata(dev); - int i; - int ret = 0; - - for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { - struct atmel_mci_slot *slot = host->slot[i]; - int err; - - slot = host->slot[i]; - if (!slot) - continue; - if (!test_bit(ATMCI_SUSPENDED, &slot->flags)) - continue; - err = mmc_resume_host(slot->mmc); - if (err < 0) - ret = err; - else - clear_bit(ATMCI_SUSPENDED, &slot->flags); - } - - return ret; -} -#endif - -static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume); - static struct platform_driver atmci_driver = { .remove = __exit_p(atmci_remove), .driver = { .name = "atmel_mci", - .pm = &atmci_pm, .of_match_table = of_match_ptr(atmci_dt_ids), }, }; diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index df9becdd2e99..f5443a6c4915 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -1157,11 +1157,6 @@ static int au1xmmc_remove(struct platform_device *pdev) static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) { struct au1xmmc_host *host = platform_get_drvdata(pdev); - int ret; - - ret = mmc_suspend_host(host->mmc); - if (ret) - return ret; au_writel(0, HOST_CONFIG2(host)); au_writel(0, HOST_CONFIG(host)); @@ -1178,7 +1173,7 @@ static int au1xmmc_resume(struct platform_device *pdev) au1xmmc_reset_controller(host); - return mmc_resume_host(host->mmc); + return 0; } #else #define au1xmmc_suspend NULL diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index 94fae2f1baaf..2b7f37e82ca9 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c @@ -391,6 +391,7 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* Disable 4 bit SDIO */ cfg &= ~SD4E; } + bfin_write_SDH_CFG(cfg); host->power_mode = ios->power_mode; #ifndef RSI_BLKSZ @@ -415,7 +416,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) cfg &= ~SD_CMD_OD; # endif - if (ios->power_mode != MMC_POWER_OFF) cfg |= PWR_ON; else @@ -433,7 +433,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) clk_ctl |= CLK_E; host->clk_div = clk_div; bfin_write_SDH_CLK_CTL(clk_ctl); - } else sdh_stop_clock(host); @@ -640,21 +639,15 @@ static int sdh_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int sdh_suspend(struct platform_device *dev, pm_message_t state) { - struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); peripheral_free_list(drv_data->pin_req); - return ret; + return 0; } static int sdh_resume(struct platform_device *dev) { - struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); int ret = 0; @@ -665,10 +658,6 @@ static int sdh_resume(struct platform_device *dev) } sdh_reset(); - - if (mmc) - ret = mmc_resume_host(mmc); - return ret; } #else diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 9d6e2b844404..1087b4c79cd6 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -667,12 +667,6 @@ static const struct mmc_host_ops cb710_mmc_host = { static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); - struct mmc_host *mmc = cb710_slot_to_mmc(slot); - int err; - - err = mmc_suspend_host(mmc); - if (err) - return err; cb710_mmc_enable_irq(slot, 0, ~0); return 0; @@ -681,11 +675,9 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) static int cb710_mmc_resume(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); - struct mmc_host *mmc = cb710_slot_to_mmc(slot); cb710_mmc_enable_irq(slot, 0, ~0); - - return mmc_resume_host(mmc); + return 0; } #endif /* CONFIG_PM */ diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index e9fa87df909c..d6153740b77f 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -193,7 +193,6 @@ struct mmc_davinci_host { #define DAVINCI_MMC_DATADIR_READ 1 #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; - unsigned char suspended; /* buffer is used during PIO of one scatterlist segment, and * is updated along with buffer_bytes_left. bytes_left applies @@ -1435,38 +1434,23 @@ static int davinci_mmcsd_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); - int ret; - ret = mmc_suspend_host(host->mmc); - if (!ret) { - writel(0, host->base + DAVINCI_MMCIM); - mmc_davinci_reset_ctrl(host, 1); - clk_disable(host->clk); - host->suspended = 1; - } else { - host->suspended = 0; - } + writel(0, host->base + DAVINCI_MMCIM); + mmc_davinci_reset_ctrl(host, 1); + clk_disable(host->clk); - return ret; + return 0; } static int davinci_mmcsd_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); - int ret; - - if (!host->suspended) - return 0; clk_enable(host->clk); - mmc_davinci_reset_ctrl(host, 0); - ret = mmc_resume_host(host->mmc); - if (!ret) - host->suspended = 0; - return ret; + return 0; } static const struct dev_pm_ops davinci_mmcsd_pm = { diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 6a1fa2110a05..3423c5ed50c7 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -14,8 +14,10 @@ #include <linux/clk.h> #include <linux/mmc/host.h> #include <linux/mmc/dw_mmc.h> +#include <linux/mmc/mmc.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/slab.h> #include "dw_mmc.h" #include "dw_mmc-pltfm.h" @@ -30,16 +32,39 @@ #define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \ SDMMC_CLKSEL_CCLK_DRIVE(y) | \ SDMMC_CLKSEL_CCLK_DIVIDER(z)) +#define SDMMC_CLKSEL_WAKEUP_INT BIT(11) #define EXYNOS4210_FIXED_CIU_CLK_DIV 2 #define EXYNOS4412_FIXED_CIU_CLK_DIV 4 +/* Block number in eMMC */ +#define DWMCI_BLOCK_NUM 0xFFFFFFFF + +#define SDMMC_EMMCP_BASE 0x1000 +#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010) +#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200) +#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204) +#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C) + +/* SMU control bits */ +#define DWMCI_MPSCTRL_SECURE_READ_BIT BIT(7) +#define DWMCI_MPSCTRL_SECURE_WRITE_BIT BIT(6) +#define DWMCI_MPSCTRL_NON_SECURE_READ_BIT BIT(5) +#define DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4) +#define DWMCI_MPSCTRL_USE_FUSE_KEY BIT(3) +#define DWMCI_MPSCTRL_ECB_MODE BIT(2) +#define DWMCI_MPSCTRL_ENCRYPTION BIT(1) +#define DWMCI_MPSCTRL_VALID BIT(0) + +#define EXYNOS_CCLKIN_MIN 50000000 /* unit: HZ */ + /* Variations in Exynos specific dw-mshc controller */ enum dw_mci_exynos_type { DW_MCI_TYPE_EXYNOS4210, DW_MCI_TYPE_EXYNOS4412, DW_MCI_TYPE_EXYNOS5250, DW_MCI_TYPE_EXYNOS5420, + DW_MCI_TYPE_EXYNOS5420_SMU, }; /* Exynos implementation specific driver private data */ @@ -48,6 +73,7 @@ struct dw_mci_exynos_priv_data { u8 ciu_div; u32 sdr_timing; u32 ddr_timing; + u32 cur_speed; }; static struct dw_mci_exynos_compatible { @@ -66,44 +92,80 @@ static struct dw_mci_exynos_compatible { }, { .compatible = "samsung,exynos5420-dw-mshc", .ctrl_type = DW_MCI_TYPE_EXYNOS5420, + }, { + .compatible = "samsung,exynos5420-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU, }, }; static int dw_mci_exynos_priv_init(struct dw_mci *host) { - struct dw_mci_exynos_priv_data *priv; - int idx; - - priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - dev_err(host->dev, "mem alloc failed for private data\n"); - return -ENOMEM; - } + struct dw_mci_exynos_priv_data *priv = host->priv; - for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { - if (of_device_is_compatible(host->dev->of_node, - exynos_compat[idx].compatible)) - priv->ctrl_type = exynos_compat[idx].ctrl_type; + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU) { + mci_writel(host, MPSBEGIN0, 0); + mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM); + mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT | + DWMCI_MPSCTRL_NON_SECURE_READ_BIT | + DWMCI_MPSCTRL_VALID | + DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT); } - host->priv = priv; return 0; } static int dw_mci_exynos_setup_clock(struct dw_mci *host) { struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned long rate = clk_get_rate(host->ciu_clk); - if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 || - priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420) - host->bus_hz /= (priv->ciu_div + 1); - else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) - host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV; - else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) - host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV; + host->bus_hz = rate / (priv->ciu_div + 1); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_exynos_suspend(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + return dw_mci_suspend(host); +} + +static int dw_mci_exynos_resume(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + dw_mci_exynos_priv_init(host); + return dw_mci_resume(host); +} + +/** + * dw_mci_exynos_resume_noirq - Exynos-specific resume code + * + * On exynos5420 there is a silicon errata that will sometimes leave the + * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate + * that it fired and we can clear it by writing a 1 back. Clear it to prevent + * interrupts from going off constantly. + * + * We run this code on all exynos variants because it doesn't hurt. + */ + +static int dw_mci_exynos_resume_noirq(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + u32 clksel; + + clksel = mci_readl(host, CLKSEL); + if (clksel & SDMMC_CLKSEL_WAKEUP_INT) + mci_writel(host, CLKSEL, clksel); return 0; } +#else +#define dw_mci_exynos_suspend NULL +#define dw_mci_exynos_resume NULL +#define dw_mci_exynos_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) { @@ -121,23 +183,68 @@ static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) { struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned int wanted = ios->clock; + unsigned long actual; + u8 div = priv->ciu_div + 1; - if (ios->timing == MMC_TIMING_UHS_DDR50) + if (ios->timing == MMC_TIMING_UHS_DDR50) { mci_writel(host, CLKSEL, priv->ddr_timing); - else + /* Should be double rate for DDR mode */ + if (ios->bus_width == MMC_BUS_WIDTH_8) + wanted <<= 1; + } else { mci_writel(host, CLKSEL, priv->sdr_timing); + } + + /* Don't care if wanted clock is zero */ + if (!wanted) + return; + + /* Guaranteed minimum frequency for cclkin */ + if (wanted < EXYNOS_CCLKIN_MIN) + wanted = EXYNOS_CCLKIN_MIN; + + if (wanted != priv->cur_speed) { + int ret = clk_set_rate(host->ciu_clk, wanted * div); + if (ret) + dev_warn(host->dev, + "failed to set clk-rate %u error: %d\n", + wanted * div, ret); + actual = clk_get_rate(host->ciu_clk); + host->bus_hz = actual / div; + priv->cur_speed = wanted; + host->current_speed = 0; + } } static int dw_mci_exynos_parse_dt(struct dw_mci *host) { - struct dw_mci_exynos_priv_data *priv = host->priv; + struct dw_mci_exynos_priv_data *priv; struct device_node *np = host->dev->of_node; u32 timing[2]; u32 div = 0; + int idx; int ret; - of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); - priv->ciu_div = div; + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(host->dev, "mem alloc failed for private data\n"); + return -ENOMEM; + } + + for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { + if (of_device_is_compatible(np, exynos_compat[idx].compatible)) + priv->ctrl_type = exynos_compat[idx].ctrl_type; + } + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) + priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1; + else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) + priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1; + else { + of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); + priv->ciu_div = div; + } ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr-timing", timing, 2); @@ -152,9 +259,131 @@ static int dw_mci_exynos_parse_dt(struct dw_mci *host) return ret; priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); + host->priv = priv; return 0; } +static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) +{ + return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL)); +} + +static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) +{ + u32 clksel; + clksel = mci_readl(host, CLKSEL); + clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample); + mci_writel(host, CLKSEL, clksel); +} + +static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) +{ + u32 clksel; + u8 sample; + + clksel = mci_readl(host, CLKSEL); + sample = (clksel + 1) & 0x7; + clksel = (clksel & ~0x7) | sample; + mci_writel(host, CLKSEL, clksel); + return sample; +} + +static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates) +{ + const u8 iter = 8; + u8 __c; + s8 i, loc = -1; + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0xc7) == 0xc7) { + loc = i; + goto out; + } + } + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0x83) == 0x83) { + loc = i; + goto out; + } + } + +out: + return loc; +} + +static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode, + struct dw_mci_tuning_data *tuning_data) +{ + struct dw_mci *host = slot->host; + struct mmc_host *mmc = slot->mmc; + const u8 *blk_pattern = tuning_data->blk_pattern; + u8 *blk_test; + unsigned int blksz = tuning_data->blksz; + u8 start_smpl, smpl, candiates = 0; + s8 found = -1; + int ret = 0; + + blk_test = kmalloc(blksz, GFP_KERNEL); + if (!blk_test) + return -ENOMEM; + + start_smpl = dw_mci_exynos_get_clksmpl(host); + + do { + struct mmc_request mrq = {NULL}; + struct mmc_command cmd = {0}; + struct mmc_command stop = {0}; + struct mmc_data data = {0}; + struct scatterlist sg; + + cmd.opcode = opcode; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + stop.opcode = MMC_STOP_TRANSMISSION; + stop.arg = 0; + stop.flags = MMC_RSP_R1B | MMC_CMD_AC; + + data.blksz = blksz; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, blk_test, blksz); + mrq.cmd = &cmd; + mrq.stop = &stop; + mrq.data = &data; + host->mrq = &mrq; + + mci_writel(host, TMOUT, ~0); + smpl = dw_mci_exynos_move_next_clksmpl(host); + + mmc_wait_for_req(mmc, &mrq); + + if (!cmd.error && !data.error) { + if (!memcmp(blk_pattern, blk_test, blksz)) + candiates |= (1 << smpl); + } else { + dev_dbg(host->dev, + "Tuning error: cmd.error:%d, data.error:%d\n", + cmd.error, data.error); + } + } while (start_smpl != smpl); + + found = dw_mci_exynos_get_best_clksmpl(candiates); + if (found >= 0) + dw_mci_exynos_set_clksmpl(host, found); + else + ret = -EIO; + + kfree(blk_test); + return ret; +} + /* Common capabilities of Exynos4/Exynos5 SoC */ static unsigned long exynos_dwmmc_caps[4] = { MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR | @@ -171,6 +400,7 @@ static const struct dw_mci_drv_data exynos_drv_data = { .prepare_command = dw_mci_exynos_prepare_command, .set_ios = dw_mci_exynos_set_ios, .parse_dt = dw_mci_exynos_parse_dt, + .execute_tuning = dw_mci_exynos_execute_tuning, }; static const struct of_device_id dw_mci_exynos_match[] = { @@ -180,6 +410,8 @@ static const struct of_device_id dw_mci_exynos_match[] = { .data = &exynos_drv_data, }, { .compatible = "samsung,exynos5420-dw-mshc", .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5420-dw-mshc-smu", + .data = &exynos_drv_data, }, {}, }; MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); @@ -194,13 +426,20 @@ static int dw_mci_exynos_probe(struct platform_device *pdev) return dw_mci_pltfm_register(pdev, drv_data); } +const struct dev_pm_ops dw_mci_exynos_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume) + .resume_noirq = dw_mci_exynos_resume_noirq, + .thaw_noirq = dw_mci_exynos_resume_noirq, + .restore_noirq = dw_mci_exynos_resume_noirq, +}; + static struct platform_driver dw_mci_exynos_pltfm_driver = { .probe = dw_mci_exynos_probe, .remove = __exit_p(dw_mci_pltfm_remove), .driver = { .name = "dwmmc_exynos", .of_match_table = dw_mci_exynos_match, - .pm = &dw_mci_pltfm_pmops, + .pm = &dw_mci_exynos_pmops, }, }; diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index 20897529ea5e..5c4965655297 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -39,7 +39,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev, { struct dw_mci *host; struct resource *regs; - int ret; host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); if (!host) @@ -59,12 +58,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev, if (IS_ERR(host->regs)) return PTR_ERR(host->regs); - if (drv_data && drv_data->init) { - ret = drv_data->init(host); - if (ret) - return ret; - } - platform_set_drvdata(pdev, host); return dw_mci_probe(host); } diff --git a/drivers/mmc/host/dw_mmc-socfpga.c b/drivers/mmc/host/dw_mmc-socfpga.c index 14b5961a851c..3e8e53ae3302 100644 --- a/drivers/mmc/host/dw_mmc-socfpga.c +++ b/drivers/mmc/host/dw_mmc-socfpga.c @@ -38,21 +38,6 @@ struct dw_mci_socfpga_priv_data { static int dw_mci_socfpga_priv_init(struct dw_mci *host) { - struct dw_mci_socfpga_priv_data *priv; - - priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - dev_err(host->dev, "mem alloc failed for private data\n"); - return -ENOMEM; - } - - priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr"); - if (IS_ERR(priv->sysreg)) { - dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n"); - return PTR_ERR(priv->sysreg); - } - host->priv = priv; - return 0; } @@ -79,12 +64,24 @@ static void dw_mci_socfpga_prepare_command(struct dw_mci *host, u32 *cmdr) static int dw_mci_socfpga_parse_dt(struct dw_mci *host) { - struct dw_mci_socfpga_priv_data *priv = host->priv; + struct dw_mci_socfpga_priv_data *priv; struct device_node *np = host->dev->of_node; u32 timing[2]; u32 div = 0; int ret; + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(host->dev, "mem alloc failed for private data\n"); + return -ENOMEM; + } + + priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr"); + if (IS_ERR(priv->sysreg)) { + dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n"); + return PTR_ERR(priv->sysreg); + } + ret = of_property_read_u32(np, "altr,dw-mshc-ciu-div", &div); if (ret) dev_info(host->dev, "No dw-mshc-ciu-div specified, assuming 1"); @@ -96,6 +93,7 @@ static int dw_mci_socfpga_parse_dt(struct dw_mci *host) return ret; priv->hs_timing = SYSMGR_SDMMC_CTRL_SET(timing[0], timing[1]); + host->priv = priv; return 0; } @@ -113,7 +111,7 @@ static const struct of_device_id dw_mci_socfpga_match[] = { }; MODULE_DEVICE_TABLE(of, dw_mci_socfpga_match); -int dw_mci_socfpga_probe(struct platform_device *pdev) +static int dw_mci_socfpga_probe(struct platform_device *pdev) { const struct dw_mci_drv_data *drv_data; const struct of_device_id *match; @@ -128,7 +126,7 @@ static struct platform_driver dw_mci_socfpga_pltfm_driver = { .remove = __exit_p(dw_mci_pltfm_remove), .driver = { .name = "dwmmc_socfpga", - .of_match_table = of_match_ptr(dw_mci_socfpga_match), + .of_match_table = dw_mci_socfpga_match, .pm = &dw_mci_pltfm_pmops, }, }; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 018f365e5ae4..4bce0deec362 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -29,6 +29,7 @@ #include <linux/irq.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> #include <linux/mmc/dw_mmc.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> @@ -50,6 +51,9 @@ #define DW_MCI_RECV_STATUS 2 #define DW_MCI_DMA_THRESHOLD 16 +#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ +#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ + #ifdef CONFIG_MMC_DW_IDMAC #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ @@ -76,42 +80,39 @@ struct idmac_desc { }; #endif /* CONFIG_MMC_DW_IDMAC */ -/** - * struct dw_mci_slot - MMC slot state - * @mmc: The mmc_host representing this slot. - * @host: The MMC controller this slot is using. - * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) - * @wp_gpio: If gpio_is_valid() we'll use this to read write protect. - * @ctype: Card type for this slot. - * @mrq: mmc_request currently being processed or waiting to be - * processed, or NULL when the slot is idle. - * @queue_node: List node for placing this node in the @queue list of - * &struct dw_mci. - * @clock: Clock rate configured by set_ios(). Protected by host->lock. - * @flags: Random state bits associated with the slot. - * @id: Number of this slot. - * @last_detect_state: Most recently observed card detect state. - */ -struct dw_mci_slot { - struct mmc_host *mmc; - struct dw_mci *host; - - int quirks; - int wp_gpio; - - u32 ctype; - - struct mmc_request *mrq; - struct list_head queue_node; +static const u8 tuning_blk_pattern_4bit[] = { + 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, + 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, + 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, + 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, + 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, + 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, + 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, + 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, +}; - unsigned int clock; - unsigned long flags; -#define DW_MMC_CARD_PRESENT 0 -#define DW_MMC_CARD_NEED_INIT 1 - int id; - int last_detect_state; +static const u8 tuning_blk_pattern_8bit[] = { + 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, + 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, + 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, + 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, + 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, + 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, + 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, + 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, + 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, + 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, + 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, + 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, + 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, + 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, }; +static inline bool dw_mci_fifo_reset(struct dw_mci *host); +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host); + #if defined(CONFIG_DEBUG_FS) static int dw_mci_req_show(struct seq_file *s, void *v) { @@ -249,10 +250,15 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) cmdr = cmd->opcode; - if (cmdr == MMC_STOP_TRANSMISSION) + if (cmd->opcode == MMC_STOP_TRANSMISSION || + cmd->opcode == MMC_GO_IDLE_STATE || + cmd->opcode == MMC_GO_INACTIVE_STATE || + (cmd->opcode == SD_IO_RW_DIRECT && + ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) cmdr |= SDMMC_CMD_STOP; else - cmdr |= SDMMC_CMD_PRV_DAT_WAIT; + if (cmd->opcode != MMC_SEND_STATUS && cmd->data) + cmdr |= SDMMC_CMD_PRV_DAT_WAIT; if (cmd->flags & MMC_RSP_PRESENT) { /* We expect a response, so set this bit */ @@ -279,6 +285,40 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) return cmdr; } +static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) +{ + struct mmc_command *stop; + u32 cmdr; + + if (!cmd->data) + return 0; + + stop = &host->stop_abort; + cmdr = cmd->opcode; + memset(stop, 0, sizeof(struct mmc_command)); + + if (cmdr == MMC_READ_SINGLE_BLOCK || + cmdr == MMC_READ_MULTIPLE_BLOCK || + cmdr == MMC_WRITE_BLOCK || + cmdr == MMC_WRITE_MULTIPLE_BLOCK) { + stop->opcode = MMC_STOP_TRANSMISSION; + stop->arg = 0; + stop->flags = MMC_RSP_R1B | MMC_CMD_AC; + } else if (cmdr == SD_IO_RW_EXTENDED) { + stop->opcode = SD_IO_RW_DIRECT; + stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | + ((cmd->arg >> 28) & 0x7); + stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; + } else { + return 0; + } + + cmdr = stop->opcode | SDMMC_CMD_STOP | + SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; + + return cmdr; +} + static void dw_mci_start_command(struct dw_mci *host, struct mmc_command *cmd, u32 cmd_flags) { @@ -293,9 +333,10 @@ static void dw_mci_start_command(struct dw_mci *host, mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); } -static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) +static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) { - dw_mci_start_command(host, data->stop, host->stop_cmdr); + struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; + dw_mci_start_command(host, stop, host->stop_cmdr); } /* DMA interface functions */ @@ -304,10 +345,10 @@ static void dw_mci_stop_dma(struct dw_mci *host) if (host->using_dma) { host->dma_ops->stop(host); host->dma_ops->cleanup(host); - } else { - /* Data transfer was stopped by the interrupt handler */ - set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } + + /* Data transfer was stopped by the interrupt handler */ + set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } static int dw_mci_get_dma_dir(struct mmc_data *data) @@ -331,6 +372,14 @@ static void dw_mci_dma_cleanup(struct dw_mci *host) dw_mci_get_dma_dir(data)); } +static void dw_mci_idmac_reset(struct dw_mci *host) +{ + u32 bmod = mci_readl(host, BMOD); + /* Software reset of DMA */ + bmod |= SDMMC_IDMAC_SWRESET; + mci_writel(host, BMOD, bmod); +} + static void dw_mci_idmac_stop_dma(struct dw_mci *host) { u32 temp; @@ -344,6 +393,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) /* Stop the IDMAC running */ temp = mci_readl(host, BMOD); temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); + temp |= SDMMC_IDMAC_SWRESET; mci_writel(host, BMOD, temp); } @@ -435,7 +485,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) p->des3 = host->sg_dma; p->des0 = IDMAC_DES0_ER; - mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); + dw_mci_idmac_reset(host); /* Mask out interrupts - get Tx & Rx complete only */ mci_writel(host, IDSTS, IDMAC_INT_CLR); @@ -532,6 +582,78 @@ static void dw_mci_post_req(struct mmc_host *mmc, data->host_cookie = 0; } +static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) +{ +#ifdef CONFIG_MMC_DW_IDMAC + unsigned int blksz = data->blksz; + const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + u32 fifo_width = 1 << host->data_shift; + u32 blksz_depth = blksz / fifo_width, fifoth_val; + u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; + int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1; + + tx_wmark = (host->fifo_depth) / 2; + tx_wmark_invers = host->fifo_depth - tx_wmark; + + /* + * MSIZE is '1', + * if blksz is not a multiple of the FIFO width + */ + if (blksz % fifo_width) { + msize = 0; + rx_wmark = 1; + goto done; + } + + do { + if (!((blksz_depth % mszs[idx]) || + (tx_wmark_invers % mszs[idx]))) { + msize = idx; + rx_wmark = mszs[idx] - 1; + break; + } + } while (--idx > 0); + /* + * If idx is '0', it won't be tried + * Thus, initial values are uesed + */ +done: + fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); + mci_writel(host, FIFOTH, fifoth_val); +#endif +} + +static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) +{ + unsigned int blksz = data->blksz; + u32 blksz_depth, fifo_depth; + u16 thld_size; + + WARN_ON(!(data->flags & MMC_DATA_READ)); + + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104) + goto disable; + + blksz_depth = blksz / (1 << host->data_shift); + fifo_depth = host->fifo_depth; + + if (blksz_depth > fifo_depth) + goto disable; + + /* + * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' + * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz + * Currently just choose blksz. + */ + thld_size = blksz; + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); + return; + +disable: + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); +} + static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) { int sg_len; @@ -556,6 +678,14 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, sg_len); + /* + * Decide the MSIZE and RX/TX Watermark. + * If current block size is same with previous size, + * no need to update fifoth. + */ + if (host->prev_blksz != data->blksz) + dw_mci_adjust_fifoth(host, data); + /* Enable the DMA interface */ temp = mci_readl(host, CTRL); temp |= SDMMC_CTRL_DMA_ENABLE; @@ -581,10 +711,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) host->sg = NULL; host->data = data; - if (data->flags & MMC_DATA_READ) + if (data->flags & MMC_DATA_READ) { host->dir_status = DW_MCI_RECV_STATUS; - else + dw_mci_ctrl_rd_thld(host, data); + } else { host->dir_status = DW_MCI_SEND_STATUS; + } if (dw_mci_submit_data_dma(host, data)) { int flags = SG_MITER_ATOMIC; @@ -606,6 +738,21 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) temp = mci_readl(host, CTRL); temp &= ~SDMMC_CTRL_DMA_ENABLE; mci_writel(host, CTRL, temp); + + /* + * Use the initial fifoth_val for PIO mode. + * If next issued data may be transfered by DMA mode, + * prev_blksz should be invalidated. + */ + mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + } else { + /* + * Keep the current block size. + * It will be used to decide whether to update + * fifoth register next time. + */ + host->prev_blksz = data->blksz; } } @@ -632,24 +779,31 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) { struct dw_mci *host = slot->host; + unsigned int clock = slot->clock; u32 div; u32 clk_en_a; - if (slot->clock != host->current_speed || force_clkinit) { - div = host->bus_hz / slot->clock; - if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) + if (!clock) { + mci_writel(host, CLKENA, 0); + mci_send_cmd(slot, + SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); + } else if (clock != host->current_speed || force_clkinit) { + div = host->bus_hz / clock; + if (host->bus_hz % clock && host->bus_hz > clock) /* * move the + 1 after the divide to prevent * over-clocking the card. */ div += 1; - div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; + div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; - dev_info(&slot->mmc->class_dev, - "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" - " div = %d)\n", slot->id, host->bus_hz, slot->clock, - div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); + if ((clock << div) != slot->__clk_old || force_clkinit) + dev_info(&slot->mmc->class_dev, + "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", + slot->id, host->bus_hz, clock, + div ? ((host->bus_hz / div) >> 1) : + host->bus_hz, div); /* disable clock */ mci_writel(host, CLKENA, 0); @@ -676,9 +830,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - host->current_speed = slot->clock; + /* keep the clock with reflecting clock dividor */ + slot->__clk_old = clock << div; } + host->current_speed = clock; + /* Set the current slot bus width */ mci_writel(host, CTYPE, (slot->ctype << slot->id)); } @@ -700,7 +857,9 @@ static void __dw_mci_start_request(struct dw_mci *host, host->pending_events = 0; host->completed_events = 0; + host->cmd_status = 0; host->data_status = 0; + host->dir_status = 0; data = cmd->data; if (data) { @@ -724,6 +883,8 @@ static void __dw_mci_start_request(struct dw_mci *host, if (mrq->stop) host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); + else + host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); } static void dw_mci_start_request(struct dw_mci *host, @@ -806,14 +967,13 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) regs &= ~((0x1 << slot->id) << 16); mci_writel(slot->host, UHS_REG, regs); + slot->host->timing = ios->timing; - if (ios->clock) { - /* - * Use mirror of ios->clock to prevent race with mmc - * core ios update when finding the minimum. - */ - slot->clock = ios->clock; - } + /* + * Use mirror of ios->clock to prevent race with mmc + * core ios update when finding the minimum. + */ + slot->clock = ios->clock; if (drv_data && drv_data->set_ios) drv_data->set_ios(slot->host, ios); @@ -939,6 +1099,38 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) } } +static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + struct dw_mci_tuning_data tuning_data; + int err = -ENOSYS; + + if (opcode == MMC_SEND_TUNING_BLOCK_HS200) { + if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { + tuning_data.blk_pattern = tuning_blk_pattern_8bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_8bit); + } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) { + tuning_data.blk_pattern = tuning_blk_pattern_4bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); + } else { + return -EINVAL; + } + } else if (opcode == MMC_SEND_TUNING_BLOCK) { + tuning_data.blk_pattern = tuning_blk_pattern_4bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); + } else { + dev_err(host->dev, + "Undefined command(%d) for tuning\n", opcode); + return -EINVAL; + } + + if (drv_data && drv_data->execute_tuning) + err = drv_data->execute_tuning(slot, opcode, &tuning_data); + return err; +} + static const struct mmc_host_ops dw_mci_ops = { .request = dw_mci_request, .pre_req = dw_mci_pre_req, @@ -947,6 +1139,7 @@ static const struct mmc_host_ops dw_mci_ops = { .get_ro = dw_mci_get_ro, .get_cd = dw_mci_get_cd, .enable_sdio_irq = dw_mci_enable_sdio_irq, + .execute_tuning = dw_mci_execute_tuning, }; static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) @@ -978,7 +1171,7 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) spin_lock(&host->lock); } -static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) +static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) { u32 status = host->cmd_status; @@ -1012,12 +1205,52 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd /* newer ip versions need a delay between retries */ if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) mdelay(20); + } - if (cmd->data) { - dw_mci_stop_dma(host); - host->data = NULL; + return cmd->error; +} + +static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) +{ + u32 status = host->data_status; + + if (status & DW_MCI_DATA_ERROR_FLAGS) { + if (status & SDMMC_INT_DRTO) { + data->error = -ETIMEDOUT; + } else if (status & SDMMC_INT_DCRC) { + data->error = -EILSEQ; + } else if (status & SDMMC_INT_EBE) { + if (host->dir_status == + DW_MCI_SEND_STATUS) { + /* + * No data CRC status was returned. + * The number of bytes transferred + * will be exaggerated in PIO mode. + */ + data->bytes_xfered = 0; + data->error = -ETIMEDOUT; + } else if (host->dir_status == + DW_MCI_RECV_STATUS) { + data->error = -EIO; + } + } else { + /* SDMMC_INT_SBE is included */ + data->error = -EIO; } + + dev_err(host->dev, "data error, status 0x%08x\n", status); + + /* + * After an error, there may be data lingering + * in the FIFO + */ + dw_mci_fifo_reset(host); + } else { + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; } + + return data->error; } static void dw_mci_tasklet_func(unsigned long priv) @@ -1025,14 +1258,16 @@ static void dw_mci_tasklet_func(unsigned long priv) struct dw_mci *host = (struct dw_mci *)priv; struct mmc_data *data; struct mmc_command *cmd; + struct mmc_request *mrq; enum dw_mci_state state; enum dw_mci_state prev_state; - u32 status, ctrl; + unsigned int err; spin_lock(&host->lock); state = host->state; data = host->data; + mrq = host->mrq; do { prev_state = state; @@ -1049,16 +1284,23 @@ static void dw_mci_tasklet_func(unsigned long priv) cmd = host->cmd; host->cmd = NULL; set_bit(EVENT_CMD_COMPLETE, &host->completed_events); - dw_mci_command_complete(host, cmd); - if (cmd == host->mrq->sbc && !cmd->error) { + err = dw_mci_command_complete(host, cmd); + if (cmd == mrq->sbc && !err) { prev_state = state = STATE_SENDING_CMD; __dw_mci_start_request(host, host->cur_slot, - host->mrq->cmd); + mrq->cmd); goto unlock; } - if (!host->mrq->data || cmd->error) { - dw_mci_request_end(host, host->mrq); + if (cmd->data && err) { + dw_mci_stop_dma(host); + send_stop_abort(host, data); + state = STATE_SENDING_STOP; + break; + } + + if (!cmd->data || err) { + dw_mci_request_end(host, mrq); goto unlock; } @@ -1069,8 +1311,7 @@ static void dw_mci_tasklet_func(unsigned long priv) if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { dw_mci_stop_dma(host); - if (data->stop) - send_stop_cmd(host, data); + send_stop_abort(host, data); state = STATE_DATA_ERROR; break; } @@ -1090,60 +1331,27 @@ static void dw_mci_tasklet_func(unsigned long priv) host->data = NULL; set_bit(EVENT_DATA_COMPLETE, &host->completed_events); - status = host->data_status; - - if (status & DW_MCI_DATA_ERROR_FLAGS) { - if (status & SDMMC_INT_DRTO) { - data->error = -ETIMEDOUT; - } else if (status & SDMMC_INT_DCRC) { - data->error = -EILSEQ; - } else if (status & SDMMC_INT_EBE && - host->dir_status == - DW_MCI_SEND_STATUS) { - /* - * No data CRC status was returned. - * The number of bytes transferred will - * be exaggerated in PIO mode. - */ - data->bytes_xfered = 0; - data->error = -ETIMEDOUT; - } else { - dev_err(host->dev, - "data FIFO error " - "(status=%08x)\n", - status); - data->error = -EIO; - } - /* - * After an error, there may be data lingering - * in the FIFO, so reset it - doing so - * generates a block interrupt, hence setting - * the scatter-gather pointer to NULL. - */ - sg_miter_stop(&host->sg_miter); - host->sg = NULL; - ctrl = mci_readl(host, CTRL); - ctrl |= SDMMC_CTRL_FIFO_RESET; - mci_writel(host, CTRL, ctrl); - } else { - data->bytes_xfered = data->blocks * data->blksz; - data->error = 0; - } + err = dw_mci_data_complete(host, data); - if (!data->stop) { - dw_mci_request_end(host, host->mrq); - goto unlock; - } + if (!err) { + if (!data->stop || mrq->sbc) { + if (mrq->sbc) + data->stop->error = 0; + dw_mci_request_end(host, mrq); + goto unlock; + } - if (host->mrq->sbc && !data->error) { - data->stop->error = 0; - dw_mci_request_end(host, host->mrq); - goto unlock; + /* stop command for open-ended transfer*/ + if (data->stop) + send_stop_abort(host, data); } + /* + * If err has non-zero, + * stop-abort command has been already issued. + */ prev_state = state = STATE_SENDING_STOP; - if (!data->error) - send_stop_cmd(host, data); + /* fall through */ case STATE_SENDING_STOP: @@ -1151,9 +1359,19 @@ static void dw_mci_tasklet_func(unsigned long priv) &host->pending_events)) break; + /* CMD error in data command */ + if (mrq->cmd->error && mrq->data) + dw_mci_fifo_reset(host); + host->cmd = NULL; - dw_mci_command_complete(host, host->mrq->stop); - dw_mci_request_end(host, host->mrq); + host->data = NULL; + + if (mrq->stop) + dw_mci_command_complete(host, mrq->stop); + else + host->cmd_status = 0; + + dw_mci_request_end(host, mrq); goto unlock; case STATE_DATA_ERROR: @@ -1697,7 +1915,6 @@ static void dw_mci_work_routine_card(struct work_struct *work) struct mmc_host *mmc = slot->mmc; struct mmc_request *mrq; int present; - u32 ctrl; present = dw_mci_get_cd(mmc); while (present != slot->last_detect_state) { @@ -1736,11 +1953,10 @@ static void dw_mci_work_routine_card(struct work_struct *work) case STATE_DATA_ERROR: if (mrq->data->error == -EINPROGRESS) mrq->data->error = -ENOMEDIUM; - if (!mrq->stop) - break; /* fall through */ case STATE_SENDING_STOP: - mrq->stop->error = -ENOMEDIUM; + if (mrq->stop) + mrq->stop->error = -ENOMEDIUM; break; } @@ -1763,23 +1979,10 @@ static void dw_mci_work_routine_card(struct work_struct *work) if (present == 0) { clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); - /* - * Clear down the FIFO - doing so generates a - * block interrupt, hence setting the - * scatter-gather pointer to NULL. - */ - sg_miter_stop(&host->sg_miter); - host->sg = NULL; - - ctrl = mci_readl(host, CTRL); - ctrl |= SDMMC_CTRL_FIFO_RESET; - mci_writel(host, CTRL, ctrl); - + /* Clear down the FIFO */ + dw_mci_fifo_reset(host); #ifdef CONFIG_MMC_DW_IDMAC - ctrl = mci_readl(host, BMOD); - /* Software reset of DMA */ - ctrl |= SDMMC_IDMAC_SWRESET; - mci_writel(host, BMOD, ctrl); + dw_mci_idmac_reset(host); #endif } @@ -1901,6 +2104,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) struct dw_mci_slot *slot; const struct dw_mci_drv_data *drv_data = host->drv_data; int ctrl_id, ret; + u32 freq[2]; u8 bus_width; mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); @@ -1916,8 +2120,14 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); mmc->ops = &dw_mci_ops; - mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); - mmc->f_max = host->bus_hz; + if (of_property_read_u32_array(host->dev->of_node, + "clock-freq-min-max", freq, 2)) { + mmc->f_min = DW_MCI_FREQ_MIN; + mmc->f_max = DW_MCI_FREQ_MAX; + } else { + mmc->f_min = freq[0]; + mmc->f_max = freq[1]; + } if (host->pdata->get_ocr) mmc->ocr_avail = host->pdata->get_ocr(id); @@ -1964,9 +2174,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) mmc->caps |= MMC_CAP_4_BIT_DATA; } - if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) - mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; - if (host->pdata->blk_settings) { mmc->max_segs = host->pdata->blk_settings->max_segs; mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; @@ -2008,12 +2215,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) /* Card initially undetected */ slot->last_detect_state = 0; - /* - * Card may have been plugged in prior to boot so we - * need to run the detect tasklet - */ - queue_work(host->card_workqueue, &host->card_work); - return 0; err_setup_bus: @@ -2074,36 +2275,57 @@ no_dma: return; } -static bool mci_wait_reset(struct device *dev, struct dw_mci *host) +static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) { unsigned long timeout = jiffies + msecs_to_jiffies(500); - unsigned int ctrl; + u32 ctrl; - mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | - SDMMC_CTRL_DMA_RESET)); + ctrl = mci_readl(host, CTRL); + ctrl |= reset; + mci_writel(host, CTRL, ctrl); /* wait till resets clear */ do { ctrl = mci_readl(host, CTRL); - if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | - SDMMC_CTRL_DMA_RESET))) + if (!(ctrl & reset)) return true; } while (time_before(jiffies, timeout)); - dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); + dev_err(host->dev, + "Timeout resetting block (ctrl reset %#x)\n", + ctrl & reset); return false; } +static inline bool dw_mci_fifo_reset(struct dw_mci *host) +{ + /* + * Reseting generates a block interrupt, hence setting + * the scatter-gather pointer to NULL. + */ + if (host->sg) { + sg_miter_stop(&host->sg_miter); + host->sg = NULL; + } + + return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET); +} + +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host) +{ + return dw_mci_ctrl_reset(host, + SDMMC_CTRL_FIFO_RESET | + SDMMC_CTRL_RESET | + SDMMC_CTRL_DMA_RESET); +} + #ifdef CONFIG_OF static struct dw_mci_of_quirks { char *quirk; int id; } of_quirks[] = { { - .quirk = "supports-highspeed", - .id = DW_MCI_QUIRK_HIGHSPEED, - }, { .quirk = "broken-cd", .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, }, @@ -2158,6 +2380,15 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) if (of_find_property(np, "enable-sdio-wakeup", NULL)) pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + if (of_find_property(np, "supports-highspeed", NULL)) + pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; + + if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL)) + pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR; + + if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL)) + pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR; + return pdata; } @@ -2221,6 +2452,15 @@ int dw_mci_probe(struct dw_mci *host) host->bus_hz = clk_get_rate(host->ciu_clk); } + if (drv_data && drv_data->init) { + ret = drv_data->init(host); + if (ret) { + dev_err(host->dev, + "implementation specific init failed\n"); + goto err_clk_ciu; + } + } + if (drv_data && drv_data->setup_clock) { ret = drv_data->setup_clock(host); if (ret) { @@ -2287,7 +2527,7 @@ int dw_mci_probe(struct dw_mci *host) } /* Reset all blocks */ - if (!mci_wait_reset(host->dev, host)) + if (!dw_mci_ctrl_all_reset(host)) return -ENODEV; host->dma_ops = host->pdata->dma_ops; @@ -2317,8 +2557,8 @@ int dw_mci_probe(struct dw_mci *host) fifo_size = host->pdata->fifo_depth; } host->fifo_depth = fifo_size; - host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | - ((fifo_size/2) << 0)); + host->fifoth_val = + SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); mci_writel(host, FIFOTH, host->fifoth_val); /* disable clock to CIU */ @@ -2456,23 +2696,6 @@ EXPORT_SYMBOL(dw_mci_remove); */ int dw_mci_suspend(struct dw_mci *host) { - int i, ret = 0; - - for (i = 0; i < host->num_slots; i++) { - struct dw_mci_slot *slot = host->slot[i]; - if (!slot) - continue; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slot[i]; - if (slot) - mmc_resume_host(host->slot[i]->mmc); - } - return ret; - } - } - if (host->vmmc) regulator_disable(host->vmmc); @@ -2493,7 +2716,7 @@ int dw_mci_resume(struct dw_mci *host) } } - if (!mci_wait_reset(host->dev, host)) { + if (!dw_mci_ctrl_all_reset(host)) { ret = -ENODEV; return ret; } @@ -2501,8 +2724,15 @@ int dw_mci_resume(struct dw_mci *host) if (host->use_dma && host->dma_ops->init) host->dma_ops->init(host); - /* Restore the old value at FIFOTH register */ + /* + * Restore the initial value at FIFOTH register + * And Invalidate the prev_blksz with zero + */ mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + + /* Put in max timeout */ + mci_writel(host, TMOUT, 0xFFFFFFFF); mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | @@ -2518,10 +2748,6 @@ int dw_mci_resume(struct dw_mci *host) dw_mci_set_ios(slot->mmc, &slot->mmc->ios); dw_mci_setup_bus(slot, true); } - - ret = mmc_resume_host(host->slot[i]->mmc); - if (ret < 0) - return ret; } return 0; } diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 81b29941c5b9..6bf24ab917e6 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -53,6 +53,7 @@ #define SDMMC_IDINTEN 0x090 #define SDMMC_DSCADDR 0x094 #define SDMMC_BUFADDR 0x098 +#define SDMMC_CDTHRCTL 0x100 #define SDMMC_DATA(x) (x) /* @@ -128,6 +129,10 @@ #define SDMMC_CMD_INDX(n) ((n) & 0x1F) /* Status register defines */ #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF) +/* FIFOTH register defines */ +#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | \ + ((t) & 0xFFF)) /* Internal DMAC interrupt defines */ #define SDMMC_IDMAC_INT_AI BIT(9) #define SDMMC_IDMAC_INT_NI BIT(8) @@ -142,6 +147,8 @@ #define SDMMC_IDMAC_SWRESET BIT(0) /* Version ID register define */ #define SDMMC_GET_VERID(x) ((x) & 0xFFFF) +/* Card read threshold */ +#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x)) /* Register access macros */ #define mci_readl(dev, reg) \ @@ -184,6 +191,52 @@ extern int dw_mci_resume(struct dw_mci *host); #endif /** + * struct dw_mci_slot - MMC slot state + * @mmc: The mmc_host representing this slot. + * @host: The MMC controller this slot is using. + * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) + * @wp_gpio: If gpio_is_valid() we'll use this to read write protect. + * @ctype: Card type for this slot. + * @mrq: mmc_request currently being processed or waiting to be + * processed, or NULL when the slot is idle. + * @queue_node: List node for placing this node in the @queue list of + * &struct dw_mci. + * @clock: Clock rate configured by set_ios(). Protected by host->lock. + * @__clk_old: The last updated clock with reflecting clock divider. + * Keeping track of this helps us to avoid spamming the console + * with CONFIG_MMC_CLKGATE. + * @flags: Random state bits associated with the slot. + * @id: Number of this slot. + * @last_detect_state: Most recently observed card detect state. + */ +struct dw_mci_slot { + struct mmc_host *mmc; + struct dw_mci *host; + + int quirks; + int wp_gpio; + + u32 ctype; + + struct mmc_request *mrq; + struct list_head queue_node; + + unsigned int clock; + unsigned int __clk_old; + + unsigned long flags; +#define DW_MMC_CARD_PRESENT 0 +#define DW_MMC_CARD_NEED_INIT 1 + int id; + int last_detect_state; +}; + +struct dw_mci_tuning_data { + const u8 *blk_pattern; + unsigned int blksz; +}; + +/** * dw_mci driver data - dw-mshc implementation specific driver data. * @caps: mmc subsystem specified capabilities of the controller(s). * @init: early implementation specific initialization. @@ -203,5 +256,7 @@ struct dw_mci_drv_data { void (*prepare_command)(struct dw_mci *host, u32 *cmdr); void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); int (*parse_dt)(struct dw_mci *host); + int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode, + struct dw_mci_tuning_data *tuning_data); }; #endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 66516339e3a0..de2139cf3444 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -880,8 +880,6 @@ static int jz4740_mmc_suspend(struct device *dev) { struct jz4740_mmc_host *host = dev_get_drvdata(dev); - mmc_suspend_host(host->mmc); - jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); return 0; @@ -893,8 +891,6 @@ static int jz4740_mmc_resume(struct device *dev) jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); - mmc_resume_host(host->mmc); - return 0; } diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index d135c76c4855..f32057972dd7 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1730,37 +1730,28 @@ static int mmci_suspend(struct device *dev) { struct amba_device *adev = to_amba_device(dev); struct mmc_host *mmc = amba_get_drvdata(adev); - int ret = 0; if (mmc) { struct mmci_host *host = mmc_priv(mmc); - - ret = mmc_suspend_host(mmc); - if (ret == 0) { - pm_runtime_get_sync(dev); - writel(0, host->base + MMCIMASK0); - } + pm_runtime_get_sync(dev); + writel(0, host->base + MMCIMASK0); } - return ret; + return 0; } static int mmci_resume(struct device *dev) { struct amba_device *adev = to_amba_device(dev); struct mmc_host *mmc = amba_get_drvdata(adev); - int ret = 0; if (mmc) { struct mmci_host *host = mmc_priv(mmc); - writel(MCI_IRQENABLE, host->base + MMCIMASK0); pm_runtime_put(dev); - - ret = mmc_resume_host(mmc); } - return ret; + return 0; } #endif diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index b900de4e7e94..9405ecdaf6cf 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -1416,28 +1416,10 @@ ioremap_free: } #ifdef CONFIG_PM -#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ -static void -do_resume_work(struct work_struct *work) -{ - struct msmsdcc_host *host = - container_of(work, struct msmsdcc_host, resume_task); - struct mmc_host *mmc = host->mmc; - - if (mmc) { - mmc_resume_host(mmc); - if (host->stat_irq) - enable_irq(host->stat_irq); - } -} -#endif - - static int msmsdcc_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = mmc_get_drvdata(dev); - int rc = 0; if (mmc) { struct msmsdcc_host *host = mmc_priv(mmc); @@ -1445,14 +1427,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state) if (host->stat_irq) disable_irq(host->stat_irq); - if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - rc = mmc_suspend_host(mmc); - if (!rc) - msmsdcc_writel(host, 0, MMCIMASK0); + msmsdcc_writel(host, 0, MMCIMASK0); if (host->clks_on) msmsdcc_disable_clocks(host, 0); } - return rc; + return 0; } static int @@ -1467,8 +1446,6 @@ msmsdcc_resume(struct platform_device *dev) msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0); - if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - mmc_resume_host(mmc); if (host->stat_irq) enable_irq(host->stat_irq); #if BUSCLK_PWRSAVE diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index deecee08c288..45aa2206741d 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -775,9 +775,9 @@ static int mvsd_probe(struct platform_device *pdev) spin_lock_init(&host->lock); - host->base = devm_request_and_ioremap(&pdev->dev, r); - if (!host->base) { - ret = -ENOMEM; + host->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); goto out; } @@ -838,33 +838,6 @@ static int mvsd_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int mvsd_suspend(struct platform_device *dev, pm_message_t state) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - - return ret; -} - -static int mvsd_resume(struct platform_device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; -} -#else -#define mvsd_suspend NULL -#define mvsd_resume NULL -#endif - static const struct of_device_id mvsdio_dt_ids[] = { { .compatible = "marvell,orion-sdio" }, { /* sentinel */ } @@ -874,8 +847,6 @@ MODULE_DEVICE_TABLE(of, mvsdio_dt_ids); static struct platform_driver mvsd_driver = { .probe = mvsd_probe, .remove = mvsd_remove, - .suspend = mvsd_suspend, - .resume = mvsd_resume, .driver = { .name = DRIVER_NAME, .of_match_table = mvsdio_dt_ids, diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index c174c6a0d224..f7199c83f5cf 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1250,28 +1250,20 @@ static int mxcmci_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); - int ret = 0; - if (mmc) - ret = mmc_suspend_host(mmc); clk_disable_unprepare(host->clk_per); clk_disable_unprepare(host->clk_ipg); - - return ret; + return 0; } static int mxcmci_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); - int ret = 0; clk_prepare_enable(host->clk_per); clk_prepare_enable(host->clk_ipg); - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; + return 0; } static const struct dev_pm_ops mxcmci_pm_ops = { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index e1fa3ef735e0..50fc9df791b2 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -724,13 +724,9 @@ static int mxs_mmc_suspend(struct device *dev) struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); struct mxs_ssp *ssp = &host->ssp; - int ret = 0; - - ret = mmc_suspend_host(mmc); clk_disable_unprepare(ssp->clk); - - return ret; + return 0; } static int mxs_mmc_resume(struct device *dev) @@ -738,13 +734,9 @@ static int mxs_mmc_resume(struct device *dev) struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); struct mxs_ssp *ssp = &host->ssp; - int ret = 0; clk_prepare_enable(ssp->clk); - - ret = mmc_resume_host(mmc); - - return ret; + return 0; } static const struct dev_pm_ops mxs_mmc_pm_ops = { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index b94f38ec2a83..0b10a9030f4e 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -128,7 +128,6 @@ struct mmc_omap_slot { struct mmc_omap_host { int initialized; - int suspended; struct mmc_request * mrq; struct mmc_command * cmd; struct mmc_data * data; @@ -1513,61 +1512,9 @@ static int mmc_omap_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - int i, ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); - - if (host == NULL || host->suspended) - return 0; - - for (i = 0; i < host->nr_slots; i++) { - struct mmc_omap_slot *slot; - - slot = host->slots[i]; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slots[i]; - mmc_resume_host(slot->mmc); - } - return ret; - } - } - host->suspended = 1; - return 0; -} - -static int mmc_omap_resume(struct platform_device *pdev) -{ - int i, ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); - - if (host == NULL || !host->suspended) - return 0; - - for (i = 0; i < host->nr_slots; i++) { - struct mmc_omap_slot *slot; - slot = host->slots[i]; - ret = mmc_resume_host(slot->mmc); - if (ret < 0) - return ret; - - host->suspended = 0; - } - return 0; -} -#else -#define mmc_omap_suspend NULL -#define mmc_omap_resume NULL -#endif - static struct platform_driver mmc_omap_driver = { .probe = mmc_omap_probe, .remove = mmc_omap_remove, - .suspend = mmc_omap_suspend, - .resume = mmc_omap_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 6ac63df645c4..dbd32ad3b749 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -75,6 +75,7 @@ #define ICE 0x1 #define ICS 0x2 #define CEN (1 << 2) +#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */ #define CLKD_MASK 0x0000FFC0 #define CLKD_SHIFT 6 #define DTO_MASK 0x000F0000 @@ -119,7 +120,8 @@ BRR_EN | BWR_EN | TC_EN | CC_EN) #define MMC_AUTOSUSPEND_DELAY 100 -#define MMC_TIMEOUT_MS 20 +#define MMC_TIMEOUT_MS 20 /* 20 mSec */ +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ #define OMAP_MMC_MIN_CLOCK 400000 #define OMAP_MMC_MAX_CLOCK 52000000 #define DRIVER_NAME "omap_hsmmc" @@ -171,6 +173,10 @@ struct omap_hsmmc_host { unsigned char bus_mode; unsigned char power_mode; int suspended; + u32 con; + u32 hctl; + u32 sysctl; + u32 capa; int irq; int use_dma, dma_ch; struct dma_chan *tx_chan; @@ -183,7 +189,6 @@ struct omap_hsmmc_host { int use_reg; int req_in_progress; struct omap_hsmmc_next next_data; - struct omap_mmc_platform_data *pdata; }; @@ -493,8 +498,8 @@ static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) if (ios->clock) { dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); - if (dsor > 250) - dsor = 250; + if (dsor > CLKD_MAX) + dsor = CLKD_MAX; } return dsor; @@ -597,25 +602,20 @@ static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) { struct mmc_ios *ios = &host->mmc->ios; - struct omap_mmc_platform_data *pdata = host->pdata; - int context_loss = 0; u32 hctl, capa; unsigned long timeout; - if (pdata->get_context_loss_count) { - context_loss = pdata->get_context_loss_count(host->dev); - if (context_loss < 0) - return 1; - } - - dev_dbg(mmc_dev(host->mmc), "context was %slost\n", - context_loss == host->context_loss ? "not " : ""); - if (host->context_loss == context_loss) - return 1; - if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) return 1; + if (host->con == OMAP_HSMMC_READ(host->base, CON) && + host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && + host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && + host->capa == OMAP_HSMMC_READ(host->base, CAPA)) + return 0; + + host->context_loss++; + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { if (host->power_mode != MMC_POWER_OFF && (1 << ios->vdd) <= MMC_VDD_23_24) @@ -655,9 +655,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) omap_hsmmc_set_bus_mode(host); out: - host->context_loss = context_loss; - - dev_dbg(mmc_dev(host->mmc), "context is restored\n"); + dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", + host->context_loss); return 0; } @@ -666,15 +665,10 @@ out: */ static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) { - struct omap_mmc_platform_data *pdata = host->pdata; - int context_loss; - - if (pdata->get_context_loss_count) { - context_loss = pdata->get_context_loss_count(host->dev); - if (context_loss < 0) - return; - host->context_loss = context_loss; - } + host->con = OMAP_HSMMC_READ(host->base, CON); + host->hctl = OMAP_HSMMC_READ(host->base, HCTL); + host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL); + host->capa = OMAP_HSMMC_READ(host->base, CAPA); } #else @@ -975,8 +969,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, unsigned long bit) { unsigned long i = 0; - unsigned long limit = (loops_per_jiffy * - msecs_to_jiffies(MMC_TIMEOUT_MS)); + unsigned long limit = MMC_TIMEOUT_US; OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | bit); @@ -988,13 +981,13 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) && (i++ < limit)) - cpu_relax(); + udelay(1); } i = 0; while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && (i++ < limit)) - cpu_relax(); + udelay(1); if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit) dev_err(mmc_dev(host->mmc), @@ -1178,9 +1171,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) struct omap_mmc_slot_data *slot = &mmc_slot(host); int carddetect; - if (host->suspended) - return IRQ_HANDLED; - sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); if (slot->card_detect) @@ -1635,18 +1625,9 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) { struct mmc_host *mmc = s->private; struct omap_hsmmc_host *host = mmc_priv(mmc); - int context_loss = 0; - - if (host->pdata->get_context_loss_count) - context_loss = host->pdata->get_context_loss_count(host->dev); - seq_printf(s, "mmc%d:\n ctx_loss:\t%d:%d\n\nregs:\n", - mmc->index, host->context_loss, context_loss); - - if (host->suspended) { - seq_printf(s, "host suspended, can't read registers\n"); - return 0; - } + seq_printf(s, "mmc%d:\n ctx_loss:\t%d\n\nregs:\n", + mmc->index, host->context_loss); pm_runtime_get_sync(host->dev); @@ -1838,13 +1819,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->ops = &omap_hsmmc_ops; - /* - * If regulator_disable can only put vcc_aux to sleep then there is - * no off state. - */ - if (mmc_slot(host).vcc_aux_disable_is_sleep) - mmc_slot(host).no_off = 1; - mmc->f_min = OMAP_MMC_MIN_CLOCK; if (pdata->max_freq > 0) @@ -1874,7 +1848,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_context_save(host); /* This can be removed once we support PBIAS with DT */ - if (host->dev->of_node && host->mapbase == 0x4809c000) + if (host->dev->of_node && res->start == 0x4809c000) host->pbias_disable = 1; host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); @@ -2119,23 +2093,12 @@ static void omap_hsmmc_complete(struct device *dev) static int omap_hsmmc_suspend(struct device *dev) { - int ret = 0; struct omap_hsmmc_host *host = dev_get_drvdata(dev); if (!host) return 0; - if (host && host->suspended) - return 0; - pm_runtime_get_sync(host->dev); - host->suspended = 1; - ret = mmc_suspend_host(host->mmc); - - if (ret) { - host->suspended = 0; - goto err; - } if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { omap_hsmmc_disable_irq(host); @@ -2145,23 +2108,19 @@ static int omap_hsmmc_suspend(struct device *dev) if (host->dbclk) clk_disable_unprepare(host->dbclk); -err: + pm_runtime_put_sync(host->dev); - return ret; + return 0; } /* Routine to resume the MMC device */ static int omap_hsmmc_resume(struct device *dev) { - int ret = 0; struct omap_hsmmc_host *host = dev_get_drvdata(dev); if (!host) return 0; - if (host && !host->suspended) - return 0; - pm_runtime_get_sync(host->dev); if (host->dbclk) @@ -2172,16 +2131,9 @@ static int omap_hsmmc_resume(struct device *dev) omap_hsmmc_protect_card(host); - /* Notify the core to resume the host */ - ret = mmc_resume_host(host->mmc); - if (ret == 0) - host->suspended = 0; - pm_runtime_mark_last_busy(host->dev); pm_runtime_put_autosuspend(host->dev); - - return ret; - + return 0; } #else diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1956a3df7cf3..32fe11323f39 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -880,35 +880,6 @@ static int pxamci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int pxamci_suspend(struct device *dev) -{ - struct mmc_host *mmc = dev_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - - return ret; -} - -static int pxamci_resume(struct device *dev) -{ - struct mmc_host *mmc = dev_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; -} - -static const struct dev_pm_ops pxamci_pm_ops = { - .suspend = pxamci_suspend, - .resume = pxamci_resume, -}; -#endif - static struct platform_driver pxamci_driver = { .probe = pxamci_probe, .remove = pxamci_remove, @@ -916,9 +887,6 @@ static struct platform_driver pxamci_driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(pxa_mmc_dt_ids), -#ifdef CONFIG_PM - .pm = &pxamci_pm_ops, -#endif }, }; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 375a880e0c5f..c46feda07d56 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -364,7 +364,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) struct mmc_host *mmc = host->mmc; struct mmc_card *card = mmc->card; struct mmc_data *data = mrq->data; - int uhs = mmc_sd_card_uhs(card); + int uhs = mmc_card_uhs(card); int read = (data->flags & MMC_DATA_READ) ? 1 : 0; u8 cfg2, trans_mode; int err; @@ -1197,37 +1197,6 @@ static const struct mmc_host_ops realtek_pci_sdmmc_ops = { .execute_tuning = sdmmc_execute_tuning, }; -#ifdef CONFIG_PM -static int rtsx_pci_sdmmc_suspend(struct platform_device *pdev, - pm_message_t state) -{ - struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); - struct mmc_host *mmc = host->mmc; - int err; - - dev_dbg(sdmmc_dev(host), "--> %s\n", __func__); - - err = mmc_suspend_host(mmc); - if (err) - return err; - - return 0; -} - -static int rtsx_pci_sdmmc_resume(struct platform_device *pdev) -{ - struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); - struct mmc_host *mmc = host->mmc; - - dev_dbg(sdmmc_dev(host), "--> %s\n", __func__); - - return mmc_resume_host(mmc); -} -#else /* CONFIG_PM */ -#define rtsx_pci_sdmmc_suspend NULL -#define rtsx_pci_sdmmc_resume NULL -#endif /* CONFIG_PM */ - static void init_extra_caps(struct realtek_pci_sdmmc *host) { struct mmc_host *mmc = host->mmc; @@ -1367,8 +1336,6 @@ static struct platform_driver rtsx_pci_sdmmc_driver = { .probe = rtsx_pci_sdmmc_drv_probe, .remove = rtsx_pci_sdmmc_drv_remove, .id_table = rtsx_pci_sdmmc_ids, - .suspend = rtsx_pci_sdmmc_suspend, - .resume = rtsx_pci_sdmmc_resume, .driver = { .owner = THIS_MODULE, .name = DRV_NAME_RTSX_PCI_SDMMC, diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 8d6794cdf899..2fce5ea5eb39 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1949,39 +1949,10 @@ static struct platform_device_id s3cmci_driver_ids[] = { MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); - -#ifdef CONFIG_PM - -static int s3cmci_suspend(struct device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - - return mmc_suspend_host(mmc); -} - -static int s3cmci_resume(struct device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - - return mmc_resume_host(mmc); -} - -static const struct dev_pm_ops s3cmci_pm = { - .suspend = s3cmci_suspend, - .resume = s3cmci_resume, -}; - -#define s3cmci_pm_ops &s3cmci_pm -#else /* CONFIG_PM */ -#define s3cmci_pm_ops NULL -#endif /* CONFIG_PM */ - - static struct platform_driver s3cmci_driver = { .driver = { .name = "s3c-sdi", .owner = THIS_MODULE, - .pm = s3cmci_pm_ops, }, .id_table = s3cmci_driver_ids, .probe = s3cmci_probe, diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index 85472d3fd37f..7a190fe4dff1 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -316,19 +316,7 @@ err_pltfm_free: static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev) { - struct sdhci_host *host = platform_get_drvdata(pdev); - int dead; - u32 scratch; - - dead = 0; - scratch = readl(host->ioaddr + SDHCI_INT_STATUS); - if (scratch == (u32)-1) - dead = 1; - sdhci_remove_host(host, dead); - - sdhci_free_host(host); - - return 0; + return sdhci_pltfm_unregister(pdev); } static struct platform_driver sdhci_bcm_kona_driver = { diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c index 36fa2df04660..f6d8d67c545f 100644 --- a/drivers/mmc/host/sdhci-bcm2835.c +++ b/drivers/mmc/host/sdhci-bcm2835.c @@ -178,13 +178,7 @@ err: static int bcm2835_sdhci_remove(struct platform_device *pdev) { - struct sdhci_host *host = platform_get_drvdata(pdev); - int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); - - sdhci_remove_host(host, dead); - sdhci_pltfm_free(pdev); - - return 0; + return sdhci_pltfm_unregister(pdev); } static const struct of_device_id bcm2835_sdhci_of_match[] = { diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index abc8cf01e6e3..461a4c3f4ef7 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -34,12 +34,40 @@ /* VENDOR SPEC register */ #define ESDHC_VENDOR_SPEC 0xc0 #define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1) +#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1) +#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8) #define ESDHC_WTMK_LVL 0x44 #define ESDHC_MIX_CTRL 0x48 +#define ESDHC_MIX_CTRL_DDREN (1 << 3) #define ESDHC_MIX_CTRL_AC23EN (1 << 7) +#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22) +#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23) +#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) /* Bits 3 and 6 are not SDHCI standard definitions */ #define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 +/* dll control register */ +#define ESDHC_DLL_CTRL 0x60 +#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9 +#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8 + +/* tune control register */ +#define ESDHC_TUNE_CTRL_STATUS 0x68 +#define ESDHC_TUNE_CTRL_STEP 1 +#define ESDHC_TUNE_CTRL_MIN 0 +#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1) + +#define ESDHC_TUNING_CTRL 0xcc +#define ESDHC_STD_TUNING_EN (1 << 24) +/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ +#define ESDHC_TUNING_START_TAP 0x1 + +#define ESDHC_TUNING_BLOCK_PATTERN_LEN 64 + +/* pinctrl state */ +#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz" +#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz" + /* * Our interpretation of the SDHCI_HOST_CONTROL register */ @@ -66,21 +94,60 @@ * As a result, the TC flag is not asserted and SW received timeout * exeception. Bit1 of Vendor Spec registor is used to fix it. */ -#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) - -enum imx_esdhc_type { - IMX25_ESDHC, - IMX35_ESDHC, - IMX51_ESDHC, - IMX53_ESDHC, - IMX6Q_USDHC, +#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1) +/* + * The flag enables the workaround for ESDHC errata ENGcm07207 which + * affects i.MX25 and i.MX35. + */ +#define ESDHC_FLAG_ENGCM07207 BIT(2) +/* + * The flag tells that the ESDHC controller is an USDHC block that is + * integrated on the i.MX6 series. + */ +#define ESDHC_FLAG_USDHC BIT(3) +/* The IP supports manual tuning process */ +#define ESDHC_FLAG_MAN_TUNING BIT(4) +/* The IP supports standard tuning process */ +#define ESDHC_FLAG_STD_TUNING BIT(5) +/* The IP has SDHCI_CAPABILITIES_1 register */ +#define ESDHC_FLAG_HAVE_CAP1 BIT(6) + +struct esdhc_soc_data { + u32 flags; +}; + +static struct esdhc_soc_data esdhc_imx25_data = { + .flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx35_data = { + .flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx51_data = { + .flags = 0, +}; + +static struct esdhc_soc_data esdhc_imx53_data = { + .flags = ESDHC_FLAG_MULTIBLK_NO_INT, +}; + +static struct esdhc_soc_data usdhc_imx6q_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING, +}; + +static struct esdhc_soc_data usdhc_imx6sl_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1, }; struct pltfm_imx_data { - int flags; u32 scratchpad; - enum imx_esdhc_type devtype; struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_100mhz; + struct pinctrl_state *pins_200mhz; + const struct esdhc_soc_data *socdata; struct esdhc_platform_data boarddata; struct clk *clk_ipg; struct clk *clk_ahb; @@ -90,25 +157,20 @@ struct pltfm_imx_data { MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ } multiblock_status; - + u32 uhs_mode; + u32 is_ddr; }; static struct platform_device_id imx_esdhc_devtype[] = { { .name = "sdhci-esdhc-imx25", - .driver_data = IMX25_ESDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx25_data, }, { .name = "sdhci-esdhc-imx35", - .driver_data = IMX35_ESDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx35_data, }, { .name = "sdhci-esdhc-imx51", - .driver_data = IMX51_ESDHC, - }, { - .name = "sdhci-esdhc-imx53", - .driver_data = IMX53_ESDHC, - }, { - .name = "sdhci-usdhc-imx6q", - .driver_data = IMX6Q_USDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx51_data, }, { /* sentinel */ } @@ -116,38 +178,34 @@ static struct platform_device_id imx_esdhc_devtype[] = { MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype); static const struct of_device_id imx_esdhc_dt_ids[] = { - { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], }, - { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], }, - { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], }, - { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], }, - { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], }, + { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, + { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, + { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, }, + { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, + { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, + { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); static inline int is_imx25_esdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX25_ESDHC; -} - -static inline int is_imx35_esdhc(struct pltfm_imx_data *data) -{ - return data->devtype == IMX35_ESDHC; + return data->socdata == &esdhc_imx25_data; } -static inline int is_imx51_esdhc(struct pltfm_imx_data *data) +static inline int is_imx53_esdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX51_ESDHC; + return data->socdata == &esdhc_imx53_data; } -static inline int is_imx53_esdhc(struct pltfm_imx_data *data) +static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX53_ESDHC; + return data->socdata == &usdhc_imx6q_data; } -static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) +static inline int esdhc_is_usdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX6Q_USDHC; + return !!(data->socdata->flags & ESDHC_FLAG_USDHC); } static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) @@ -164,7 +222,21 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) struct pltfm_imx_data *imx_data = pltfm_host->priv; u32 val = readl(host->ioaddr + reg); + if (unlikely(reg == SDHCI_PRESENT_STATE)) { + u32 fsl_prss = val; + /* save the least 20 bits */ + val = fsl_prss & 0x000FFFFF; + /* move dat[0-3] bits */ + val |= (fsl_prss & 0x0F000000) >> 4; + /* move cmd line bit */ + val |= (fsl_prss & 0x00800000) << 1; + } + if (unlikely(reg == SDHCI_CAPABILITIES)) { + /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */ + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val &= 0xffff0000; + /* In FSL esdhc IC module, only bit20 is used to indicate the * ADMA2 capability of esdhc, but this bit is messed up on * some SOCs (e.g. on MX25, MX35 this bit is set, but they @@ -178,6 +250,25 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) } } + if (unlikely(reg == SDHCI_CAPABILITIES_1)) { + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF; + else + /* imx6q/dl does not have cap_1 register, fake one */ + val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 + | SDHCI_SUPPORT_SDR50 + | SDHCI_USE_SDR50_TUNING; + } + } + + if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) { + val = 0; + val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT; + val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT; + val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT; + } + if (unlikely(reg == SDHCI_INT_STATUS)) { if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; @@ -224,7 +315,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) } } - if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (reg == SDHCI_INT_STATUS) && (val & SDHCI_INT_DATA_END))) { u32 v; @@ -256,10 +347,12 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; + u16 ret = 0; + u32 val; if (unlikely(reg == SDHCI_HOST_VERSION)) { reg ^= 2; - if (is_imx6q_usdhc(imx_data)) { + if (esdhc_is_usdhc(imx_data)) { /* * The usdhc register returns a wrong host version. * Correct it here. @@ -268,6 +361,30 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) } } + if (unlikely(reg == SDHCI_HOST_CONTROL2)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & ESDHC_VENDOR_SPEC_VSELECT) + ret |= SDHCI_CTRL_VDD_180; + + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + val = readl(host->ioaddr + ESDHC_MIX_CTRL); + else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) + /* the std tuning bits is in ACMD12_ERR for imx6sl */ + val = readl(host->ioaddr + SDHCI_ACMD12_ERR); + } + + if (val & ESDHC_MIX_CTRL_EXE_TUNE) + ret |= SDHCI_CTRL_EXEC_TUNING; + if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) + ret |= SDHCI_CTRL_TUNED_CLK; + + ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK); + ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + + return ret; + } + return readw(host->ioaddr + reg); } @@ -275,10 +392,59 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; + u32 new_val = 0; switch (reg) { + case SDHCI_CLOCK_CONTROL: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CLOCK_CARD_EN) + new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + else + new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + return; + case SDHCI_HOST_CONTROL2: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CTRL_VDD_180) + new_val |= ESDHC_VENDOR_SPEC_VSELECT; + else + new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK; + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (val & SDHCI_CTRL_TUNED_CLK) + new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL; + else + new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + new_val = readl(host->ioaddr + ESDHC_TUNING_CTRL); + if (val & SDHCI_CTRL_EXEC_TUNING) { + new_val |= ESDHC_STD_TUNING_EN | + ESDHC_TUNING_START_TAP; + v |= ESDHC_MIX_CTRL_EXE_TUNE; + m |= ESDHC_MIX_CTRL_FBCLK_SEL; + } else { + new_val &= ~ESDHC_STD_TUNING_EN; + v &= ~ESDHC_MIX_CTRL_EXE_TUNE; + m &= ~ESDHC_MIX_CTRL_FBCLK_SEL; + } + + if (val & SDHCI_CTRL_TUNED_CLK) + v |= ESDHC_MIX_CTRL_SMPCLK_SEL; + else + v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + + writel(new_val, host->ioaddr + ESDHC_TUNING_CTRL); + writel(v, host->ioaddr + SDHCI_ACMD12_ERR); + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + } + return; case SDHCI_TRANSFER_MODE: - if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (host->cmd->opcode == SD_IO_RW_EXTENDED) && (host->cmd->data->blocks > 1) && (host->cmd->data->flags & MMC_DATA_READ)) { @@ -288,7 +454,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); } - if (is_imx6q_usdhc(imx_data)) { + if (esdhc_is_usdhc(imx_data)) { u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); /* Swap AC23 bit */ if (val & SDHCI_TRNS_AUTO_CMD23) { @@ -310,10 +476,10 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) val |= SDHCI_CMD_ABORTCMD; if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && - (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) imx_data->multiblock_status = MULTIBLK_IN_PROCESS; - if (is_imx6q_usdhc(imx_data)) + if (esdhc_is_usdhc(imx_data)) writel(val << 16, host->ioaddr + SDHCI_TRANSFER_MODE); else @@ -379,8 +545,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) * The reset on usdhc fails to clear MIX_CTRL register. * Do it manually here. */ - if (is_imx6q_usdhc(imx_data)) + if (esdhc_is_usdhc(imx_data)) { writel(0, host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 0; + } } } @@ -409,8 +577,60 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + unsigned int host_clock = clk_get_rate(pltfm_host->clk); + int pre_div = 2; + int div = 1; + u32 temp, val; + + if (clock == 0) { + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + } + goto out; + } + + if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr) + pre_div = 1; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host_clock / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; + + while (host_clock / pre_div / div > clock && div < 16) + div++; - esdhc_set_clock(host, clock, clk_get_rate(pltfm_host->clk)); + host->mmc->actual_clock = host_clock / pre_div / div; + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->mmc->actual_clock); + + if (imx_data->is_ddr) + pre_div >>= 2; + else + pre_div >>= 1; + div--; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + } + + mdelay(1); +out: + host->clock = clock; } static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) @@ -454,7 +674,192 @@ static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width) return 0; } -static const struct sdhci_ops sdhci_esdhc_ops = { +static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) +{ + u32 reg; + + /* FIXME: delay a bit for card to be ready for next tuning due to errors */ + mdelay(1); + + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | + ESDHC_MIX_CTRL_FBCLK_SEL; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); + writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + dev_dbg(mmc_dev(host->mmc), + "tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n", + val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); +} + +static void esdhc_request_done(struct mmc_request *mrq) +{ + complete(&mrq->completion); +} + +static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode) +{ + struct mmc_command cmd = {0}; + struct mmc_request mrq = {0}; + struct mmc_data data = {0}; + struct scatterlist sg; + char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN]; + + cmd.opcode = opcode; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern)); + + mrq.cmd = &cmd; + mrq.cmd->mrq = &mrq; + mrq.data = &data; + mrq.data->mrq = &mrq; + mrq.cmd->data = mrq.data; + + mrq.done = esdhc_request_done; + init_completion(&(mrq.completion)); + + disable_irq(host->irq); + spin_lock(&host->lock); + host->mrq = &mrq; + + sdhci_send_command(host, mrq.cmd); + + spin_unlock(&host->lock); + enable_irq(host->irq); + + wait_for_completion(&mrq.completion); + + if (cmd.error) + return cmd.error; + if (data.error) + return data.error; + + return 0; +} + +static void esdhc_post_tuning(struct sdhci_host *host) +{ + u32 reg; + + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); +} + +static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) +{ + int min, max, avg, ret; + + /* find the mininum delay first which can pass tuning */ + min = ESDHC_TUNE_CTRL_MIN; + while (min < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, min); + if (!esdhc_send_tuning_cmd(host, opcode)) + break; + min += ESDHC_TUNE_CTRL_STEP; + } + + /* find the maxinum delay which can not pass tuning */ + max = min + ESDHC_TUNE_CTRL_STEP; + while (max < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, max); + if (esdhc_send_tuning_cmd(host, opcode)) { + max -= ESDHC_TUNE_CTRL_STEP; + break; + } + max += ESDHC_TUNE_CTRL_STEP; + } + + /* use average delay to get the best timing */ + avg = (min + max) / 2; + esdhc_prepare_tuning(host, avg); + ret = esdhc_send_tuning_cmd(host, opcode); + esdhc_post_tuning(host); + + dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", + ret ? "failed" : "passed", avg, ret); + + return ret; +} + +static int esdhc_change_pinstate(struct sdhci_host *host, + unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + struct pinctrl_state *pinctrl; + + dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); + + if (IS_ERR(imx_data->pinctrl) || + IS_ERR(imx_data->pins_default) || + IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) + return -EINVAL; + + switch (uhs) { + case MMC_TIMING_UHS_SDR50: + pinctrl = imx_data->pins_100mhz; + break; + case MMC_TIMING_UHS_SDR104: + pinctrl = imx_data->pins_200mhz; + break; + default: + /* back to default state for other legacy timing */ + pinctrl = imx_data->pins_default; + } + + return pinctrl_select_state(imx_data->pinctrl, pinctrl); +} + +static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + + switch (uhs) { + case MMC_TIMING_UHS_SDR12: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_UHS_SDR25: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50; + break; + case MMC_TIMING_UHS_SDR104: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104; + break; + case MMC_TIMING_UHS_DDR50: + imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50; + writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | + ESDHC_MIX_CTRL_DDREN, + host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 1; + if (boarddata->delay_line) { + u32 v; + v = boarddata->delay_line << + ESDHC_DLL_OVERRIDE_VAL_SHIFT | + (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT); + if (is_imx53_esdhc(imx_data)) + v <<= 1; + writel(v, host->ioaddr + ESDHC_DLL_CTRL); + } + break; + } + + return esdhc_change_pinstate(host, uhs); +} + +static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, .write_l = esdhc_writel_le, @@ -465,6 +870,7 @@ static const struct sdhci_ops sdhci_esdhc_ops = { .get_min_clock = esdhc_pltfm_get_min_clock, .get_ro = esdhc_pltfm_get_ro, .platform_bus_width = esdhc_pltfm_bus_width, + .set_uhs_signaling = esdhc_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { @@ -506,6 +912,14 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, of_property_read_u32(np, "max-frequency", &boarddata->f_max); + if (of_find_property(np, "no-1-8-v", NULL)) + boarddata->support_vsel = false; + else + boarddata->support_vsel = true; + + if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line)) + boarddata->delay_line = 0; + return 0; } #else @@ -539,9 +953,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto free_sdhci; } - if (of_id) - pdev->id_entry = of_id->data; - imx_data->devtype = pdev->id_entry->driver_data; + imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *) + pdev->id_entry->driver_data; pltfm_host->priv = imx_data; imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); @@ -568,29 +981,39 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) clk_prepare_enable(imx_data->clk_ipg); clk_prepare_enable(imx_data->clk_ahb); - imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(imx_data->pinctrl)) { err = PTR_ERR(imx_data->pinctrl); goto disable_clk; } + imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(imx_data->pins_default)) { + err = PTR_ERR(imx_data->pins_default); + dev_err(mmc_dev(host->mmc), "could not get default state\n"); + goto disable_clk; + } + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) + if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207) /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK | SDHCI_QUIRK_BROKEN_ADMA; - if (is_imx53_esdhc(imx_data)) - imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; - /* * The imx6q ROM code will change the default watermark level setting * to something insane. Change it back here. */ - if (is_imx6q_usdhc(imx_data)) + if (esdhc_is_usdhc(imx_data)) { writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + } + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + sdhci_esdhc_ops.platform_execute_tuning = + esdhc_executing_tuning; boarddata = &imx_data->boarddata; if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { if (!host->mmc->parent->platform_data) { @@ -650,6 +1073,23 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) break; } + /* sdr50 and sdr104 needs work on 1.8v signal voltage */ + if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data)) { + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_100MHZ); + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_200MHZ); + if (IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) { + dev_warn(mmc_dev(host->mmc), + "could not get ultra high speed state, work on normal mode\n"); + /* fall back to not support uhs by specify no 1.8v quirk */ + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + } else { + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + err = sdhci_add_host(host); if (err) goto disable_clk; diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index a2a06420e463..a7d9f95a7b03 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -49,41 +49,4 @@ #define ESDHC_HOST_CONTROL_RES 0x05 -static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock, - unsigned int host_clock) -{ - int pre_div = 2; - int div = 1; - u32 temp; - - if (clock == 0) - goto out; - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - - while (host_clock / pre_div / 16 > clock && pre_div < 256) - pre_div *= 2; - - while (host_clock / pre_div / div > clock && div < 16) - div++; - - dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host_clock / pre_div / div); - - pre_div >>= 1; - div--; - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - mdelay(1); -out: - host->clock = clock; -} - #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index e328252ebf2a..0b249970b119 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -199,6 +199,14 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { + + int pre_div = 2; + int div = 1; + u32 temp; + + if (clock == 0) + goto out; + /* Workaround to reduce the clock frequency for p1010 esdhc */ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { if (clock > 20000000) @@ -207,8 +215,31 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) clock -= 5000000; } - /* Set the clock */ - esdhc_set_clock(host, clock, host->max_clk); + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; + + while (host->max_clk / pre_div / div > clock && div < 16) + div++; + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / pre_div / div); + + pre_div >>= 1; + div--; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + mdelay(1); +out: + host->clock = clock; } #ifdef CONFIG_PM diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index d7d6bc8968d2..8f753811fc7a 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -37,6 +37,12 @@ #define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 #define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 #define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50 +#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa +#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb +#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 +#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 /* * PCI registers @@ -356,6 +362,28 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { .allow_runtime_pm = true, }; +/* Define Host controllers for Intel Merrifield platform */ +#define INTEL_MRFL_EMMC_0 0 +#define INTEL_MRFL_EMMC_1 1 + +static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot) +{ + if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) && + (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1)) + /* SD support is not ready yet */ + return -ENODEV; + + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | + MMC_CAP_1_8V_DDR; + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .probe_slot = intel_mrfl_mmc_probe_slot, +}; + /* O2Micro extra registers */ #define O2_SD_LOCK_WP 0xD3 #define O2_SD_MULTI_VCC3V 0xEE @@ -939,6 +967,54 @@ static const struct pci_device_id pci_ids[] = { .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_MRFL_MMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, + }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 7a7fb4f0d5a4..bd8a0982aec3 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -49,7 +49,6 @@ static unsigned int debug_quirks2; static void sdhci_finish_data(struct sdhci_host *); -static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); static void sdhci_finish_command(struct sdhci_host *); static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); static void sdhci_tuning_timer(unsigned long data); @@ -981,7 +980,7 @@ static void sdhci_finish_data(struct sdhci_host *host) tasklet_schedule(&host->finish_tasklet); } -static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) +void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) { int flags; u32 mask; @@ -1053,6 +1052,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); } +EXPORT_SYMBOL_GPL(sdhci_send_command); static void sdhci_finish_command(struct sdhci_host *host) { @@ -1435,7 +1435,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) } if (host->version >= SDHCI_SPEC_300 && - (ios->power_mode == MMC_POWER_UP)) + (ios->power_mode == MMC_POWER_UP) && + !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) sdhci_enable_preset_value(host, false); sdhci_set_clock(host, ios->clock); @@ -1875,6 +1876,14 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) return 0; } + if (host->ops->platform_execute_tuning) { + spin_unlock(&host->lock); + enable_irq(host->irq); + err = host->ops->platform_execute_tuning(host, opcode); + sdhci_runtime_pm_put(host); + return err; + } + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); /* @@ -1981,6 +1990,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) if (!tuning_loop_counter || !timeout) { ctrl &= ~SDHCI_CTRL_TUNED_CLK; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + err = -EIO; } else { if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { pr_info(DRIVER_NAME ": Tuning procedure" @@ -2491,6 +2501,14 @@ again: result = IRQ_HANDLED; intmask = sdhci_readl(host, SDHCI_INT_STATUS); + + /* + * If we know we'll call the driver to signal SDIO IRQ, disregard + * further indications of Card Interrupt in the status to avoid a + * needless loop. + */ + if (cardint) + intmask &= ~SDHCI_INT_CARD_INT; if (intmask && --max_loops) goto again; out: @@ -2546,8 +2564,6 @@ EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups); int sdhci_suspend_host(struct sdhci_host *host) { - int ret; - if (host->ops->platform_suspend) host->ops->platform_suspend(host); @@ -2559,19 +2575,6 @@ int sdhci_suspend_host(struct sdhci_host *host) host->flags &= ~SDHCI_NEEDS_RETUNING; } - ret = mmc_suspend_host(host->mmc); - if (ret) { - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - host->flags |= SDHCI_NEEDS_RETUNING; - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); - } - - sdhci_enable_card_detection(host); - - return ret; - } - if (!device_may_wakeup(mmc_dev(host->mmc))) { sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); free_irq(host->irq, host); @@ -2579,14 +2582,14 @@ int sdhci_suspend_host(struct sdhci_host *host) sdhci_enable_irq_wakeups(host); enable_irq_wake(host->irq); } - return ret; + return 0; } EXPORT_SYMBOL_GPL(sdhci_suspend_host); int sdhci_resume_host(struct sdhci_host *host) { - int ret; + int ret = 0; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->enable_dma) @@ -2615,7 +2618,6 @@ int sdhci_resume_host(struct sdhci_host *host) mmiowb(); } - ret = mmc_resume_host(host->mmc); sdhci_enable_card_detection(host); if (host->ops->platform_resume) diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index b037f188fe44..0a3ed01887db 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -288,6 +288,7 @@ struct sdhci_ops { unsigned int (*get_ro)(struct sdhci_host *host); void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); + int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); void (*hw_reset)(struct sdhci_host *host); void (*platform_suspend)(struct sdhci_host *host); @@ -393,6 +394,8 @@ static inline void *sdhci_priv(struct sdhci_host *host) extern void sdhci_card_detect(struct sdhci_host *host); extern int sdhci_add_host(struct sdhci_host *host); extern void sdhci_remove_host(struct sdhci_host *host, int dead); +extern void sdhci_send_command(struct sdhci_host *host, + struct mmc_command *cmd); #ifdef CONFIG_PM extern int sdhci_suspend_host(struct sdhci_host *host); diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index 50adbd155f35..b7e305775314 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -516,9 +516,7 @@ static void sdricoh_pcmcia_detach(struct pcmcia_device *link) #ifdef CONFIG_PM static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) { - struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "suspend\n"); - mmc_suspend_host(mmc); return 0; } @@ -527,7 +525,6 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link) struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "resume\n"); sdricoh_reset(mmc_priv(mmc)); - mmc_resume_host(mmc); return 0; } #else diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 36629a024aa1..d032b080ac4d 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -964,7 +964,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) static int sh_mmcif_clk_update(struct sh_mmcif_host *host) { - int ret = clk_enable(host->hclk); + int ret = clk_prepare_enable(host->hclk); if (!ret) { host->clk = clk_get_rate(host->hclk); @@ -1018,7 +1018,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } if (host->power) { pm_runtime_put_sync(&host->pd->dev); - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); host->power = false; if (ios->power_mode == MMC_POWER_OFF) sh_mmcif_set_power(host, ios); @@ -1466,7 +1466,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) mutex_init(&host->thread_lock); - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) goto emmcaddh; @@ -1487,7 +1487,7 @@ ereqirq1: ereqirq0: pm_runtime_suspend(&pdev->dev); eresume: - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); eclkupdate: clk_put(host->hclk); eclkget: @@ -1505,7 +1505,7 @@ static int sh_mmcif_remove(struct platform_device *pdev) int irq[2]; host->dying = true; - clk_enable(host->hclk); + clk_prepare_enable(host->hclk); pm_runtime_get_sync(&pdev->dev); dev_pm_qos_hide_latency_limit(&pdev->dev); @@ -1530,7 +1530,7 @@ static int sh_mmcif_remove(struct platform_device *pdev) if (irq[1] >= 0) free_irq(irq[1], host); - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); mmc_free_host(host->mmc); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -1538,28 +1538,21 @@ static int sh_mmcif_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sh_mmcif_suspend(struct device *dev) { struct sh_mmcif_host *host = dev_get_drvdata(dev); - int ret = mmc_suspend_host(host->mmc); - if (!ret) - sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - return ret; + return 0; } static int sh_mmcif_resume(struct device *dev) { - struct sh_mmcif_host *host = dev_get_drvdata(dev); - - return mmc_resume_host(host->mmc); + return 0; } -#else -#define sh_mmcif_suspend NULL -#define sh_mmcif_resume NULL -#endif /* CONFIG_PM */ +#endif static const struct of_device_id mmcif_of_match[] = { { .compatible = "renesas,sh-mmcif" }, @@ -1568,8 +1561,7 @@ static const struct of_device_id mmcif_of_match[] = { MODULE_DEVICE_TABLE(of, mmcif_of_match); static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { - .suspend = sh_mmcif_suspend, - .resume = sh_mmcif_resume, + SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume) }; static struct platform_driver sh_mmcif_driver = { diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 43d962829f8e..d1760ebcac03 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -1030,7 +1030,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) { - return mmc_suspend_host(tifm_get_drvdata(sock)); + return 0; } static int tifm_sd_resume(struct tifm_dev *sock) @@ -1044,8 +1044,6 @@ static int tifm_sd_resume(struct tifm_dev *sock) if (rc) host->eject = 1; - else - rc = mmc_resume_host(mmc); return rc; } diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index b3802256f954..f3b2d8ca1eca 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -1145,12 +1145,9 @@ int tmio_mmc_host_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct tmio_mmc_host *host = mmc_priv(mmc); - int ret = mmc_suspend_host(mmc); - if (!ret) - tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); - - return ret; + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + return 0; } EXPORT_SYMBOL(tmio_mmc_host_suspend); @@ -1163,7 +1160,7 @@ int tmio_mmc_host_resume(struct device *dev) /* The MMC core will perform the complete set up */ host->resuming = true; - return mmc_resume_host(mmc); + return 0; } EXPORT_SYMBOL(tmio_mmc_host_resume); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 4f84586c6e9e..63fac78b3d46 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1269,21 +1269,18 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host) static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) { struct via_crdr_mmc_host *host; - int ret = 0; host = pci_get_drvdata(pcidev); via_save_pcictrlreg(host); via_save_sdcreg(host); - ret = mmc_suspend_host(host->mmc); - pci_save_state(pcidev); pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); pci_disable_device(pcidev); pci_set_power_state(pcidev, pci_choose_state(pcidev, state)); - return ret; + return 0; } static int via_sd_resume(struct pci_dev *pcidev) @@ -1316,8 +1313,6 @@ static int via_sd_resume(struct pci_dev *pcidev) via_restore_pcictrlreg(sdhost); via_init_sdc_pm(sdhost); - ret = mmc_resume_host(sdhost->mmc); - return ret; } diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index e9028ad05ffb..4262296c12fa 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -2392,26 +2392,12 @@ static void vub300_disconnect(struct usb_interface *interface) #ifdef CONFIG_PM static int vub300_suspend(struct usb_interface *intf, pm_message_t message) { - struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); - if (!vub300 || !vub300->mmc) { - return 0; - } else { - struct mmc_host *mmc = vub300->mmc; - mmc_suspend_host(mmc); - return 0; - } + return 0; } static int vub300_resume(struct usb_interface *intf) { - struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); - if (!vub300 || !vub300->mmc) { - return 0; - } else { - struct mmc_host *mmc = vub300->mmc; - mmc_resume_host(mmc); - return 0; - } + return 0; } #else #define vub300_suspend NULL diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index e954b7758876..1defd5ed3236 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1814,28 +1814,11 @@ static void wbsd_pnp_remove(struct pnp_dev *dev) #ifdef CONFIG_PM -static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) -{ - BUG_ON(host == NULL); - - return mmc_suspend_host(host->mmc); -} - -static int wbsd_resume(struct wbsd_host *host) -{ - BUG_ON(host == NULL); - - wbsd_init_device(host); - - return mmc_resume_host(host->mmc); -} - static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; - int ret; if (mmc == NULL) return 0; @@ -1844,12 +1827,7 @@ static int wbsd_platform_suspend(struct platform_device *dev, host = mmc_priv(mmc); - ret = wbsd_suspend(host, state); - if (ret) - return ret; - wbsd_chip_poweroff(host); - return 0; } @@ -1872,7 +1850,8 @@ static int wbsd_platform_resume(struct platform_device *dev) */ mdelay(5); - return wbsd_resume(host); + wbsd_init_device(host); + return 0; } #ifdef CONFIG_PNP @@ -1880,16 +1859,12 @@ static int wbsd_platform_resume(struct platform_device *dev) static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); - struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Suspending...\n"); - - host = mmc_priv(mmc); - - return wbsd_suspend(host, state); + return 0; } static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) @@ -1922,7 +1897,8 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) */ mdelay(5); - return wbsd_resume(host); + wbsd_init_device(host); + return 0; } #endif /* CONFIG_PNP */ diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 34231d5168fc..e902ed7846b0 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -212,28 +212,14 @@ struct wmt_mci_priv { static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) { - u32 reg_tmp; - if (enable) { - if (priv->power_inverted) { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp | BM_SD_OFF, - priv->sdmmc_base + SDMMC_BUSMODE); - } else { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp & (~BM_SD_OFF), - priv->sdmmc_base + SDMMC_BUSMODE); - } - } else { - if (priv->power_inverted) { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp & (~BM_SD_OFF), - priv->sdmmc_base + SDMMC_BUSMODE); - } else { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp | BM_SD_OFF, - priv->sdmmc_base + SDMMC_BUSMODE); - } - } + u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + + if (enable ^ priv->power_inverted) + reg_tmp &= ~BM_SD_OFF; + else + reg_tmp |= BM_SD_OFF; + + writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE); } static void wmt_mci_read_response(struct mmc_host *mmc) @@ -939,28 +925,23 @@ static int wmt_mci_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct mmc_host *mmc = platform_get_drvdata(pdev); struct wmt_mci_priv *priv; - int ret; if (!mmc) return 0; priv = mmc_priv(mmc); - ret = mmc_suspend_host(mmc); - - if (!ret) { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + - SDMMC_BUSMODE); + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + + SDMMC_BUSMODE); - reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); - writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); - writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); - writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); - clk_disable(priv->clk_sdmmc); - } - return ret; + clk_disable(priv->clk_sdmmc); + return 0; } static int wmt_mci_resume(struct device *dev) @@ -969,7 +950,6 @@ static int wmt_mci_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct mmc_host *mmc = platform_get_drvdata(pdev); struct wmt_mci_priv *priv; - int ret = 0; if (mmc) { priv = mmc_priv(mmc); @@ -987,10 +967,9 @@ static int wmt_mci_resume(struct device *dev) writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + SDMMC_INTMASK0); - ret = mmc_resume_host(mmc); } - return ret; + return 0; } static const struct dev_pm_ops wmt_mci_pm = { diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index d78a97d4153a..59f08c44abdb 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, dma_dev = host->dma_chan->device; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; phys_addr = dma_map_single(dma_dev->dev, p, len, dir); if (dma_mapping_error(dma_dev->dev, phys_addr)) { diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index bd1cb672034f..1b0265e85a06 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -491,7 +491,7 @@ static uint8_t docg4_read_byte(struct mtd_info *mtd) return status; } - dev_warn(doc->dev, "unexpectd call to read_byte()\n"); + dev_warn(doc->dev, "unexpected call to read_byte()\n"); return 0; } diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 3dc1a7564d87..8b2752263db9 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, dma_dev = chan->device; dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); - flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; - if (direction == DMA_TO_DEVICE) { dma_src = dma_addr; dma_dst = host->data_pa; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index bc8fd362a5aa..0ec2a7e8c8a9 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -524,8 +524,9 @@ static ssize_t bonding_store_arp_interval(struct device *d, goto out; } if (bond->params.mode == BOND_MODE_ALB || - bond->params.mode == BOND_MODE_TLB) { - pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n", + bond->params.mode == BOND_MODE_TLB || + bond->params.mode == BOND_MODE_8023AD) { + pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", bond->dev->name, bond->dev->name); ret = -EINVAL; goto out; @@ -603,15 +604,14 @@ static ssize_t bonding_store_arp_targets(struct device *d, return restart_syscall(); targets = bond->params.arp_targets; - newtarget = in_aton(buf + 1); + if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) || + IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) { + pr_err("%s: invalid ARP target %pI4 specified for addition\n", + bond->dev->name, &newtarget); + goto out; + } /* look for adds */ if (buf[0] == '+') { - if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { - pr_err("%s: invalid ARP target %pI4 specified for addition\n", - bond->dev->name, &newtarget); - goto out; - } - if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */ pr_err("%s: ARP target %pI4 is already present\n", bond->dev->name, &newtarget); @@ -634,12 +634,6 @@ static ssize_t bonding_store_arp_targets(struct device *d, targets[ind] = newtarget; write_unlock_bh(&bond->lock); } else if (buf[0] == '-') { - if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { - pr_err("%s: invalid ARP target %pI4 specified for removal\n", - bond->dev->name, &newtarget); - goto out; - } - ind = bond_get_targets_ip(targets, newtarget); if (ind == -1) { pr_err("%s: unable to remove nonexistent ARP target %pI4.\n", @@ -701,6 +695,8 @@ static ssize_t bonding_store_downdelay(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (!(bond->params.miimon)) { pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", bond->dev->name); @@ -734,6 +730,7 @@ static ssize_t bonding_store_downdelay(struct device *d, } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, @@ -756,6 +753,8 @@ static ssize_t bonding_store_updelay(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (!(bond->params.miimon)) { pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", bond->dev->name); @@ -789,6 +788,7 @@ static ssize_t bonding_store_updelay(struct device *d, } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 77a07a12e77f..ca31286aa028 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -63,6 +63,9 @@ (((mode) == BOND_MODE_TLB) || \ ((mode) == BOND_MODE_ALB)) +#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \ + ((htonl(INADDR_BROADCAST) == a) || \ + ipv4_is_zeronet(a)) /* * Less bad way to call ioctl from within the kernel; this needs to be * done some other way to get the call out of interrupt context. diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5aa5e8146496..c3c4c266b846 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1388,6 +1388,9 @@ static int alx_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 4e01c57d8c8d..a1f66e2c9a86 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1376,7 +1376,6 @@ enum { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, - BNX2X_SP_RTNL_TX_RESUME, }; struct bnx2x_prev_path_list { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dcafbda3e5be..ec96130533cc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2959,6 +2959,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->port.pmf = 0; + /* clear pending work in rtnl task */ + bp->sp_rtnl_state = 0; + smp_mb(); + /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); if (CNIC_LOADED(bp)) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fcf2761d8828..fdace204b054 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* ets may affect cmng configuration: reinit it in hw */ bnx2x_set_local_cmng(bp); - - set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); - return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e622cc1f96ff..814d0eca9b33 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -577,7 +577,9 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -614,7 +616,9 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -5231,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp) case EVENT_RING_OPCODE_STOP_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: @@ -9352,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset(bp, global); barrier(); + /* clear errors in PGB */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); + /* Recover after reset: */ /* MCP */ if (global && bnx2x_reset_mcp_comp(bp, val)) @@ -9706,11 +9714,10 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_pf_set_vfs_vlan(bp); - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { bnx2x_dcbx_stop_hw_tx(bp); - - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) bnx2x_dcbx_resume_hw_tx(bp); + } /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5ecf267dc4cc..3efbb35267c8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2864,6 +2864,17 @@ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c + /* [R 9] Interrupt register #0 read */ #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 /* [RC 9] Interrupt register #0 read clear */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 9199adf32d33..efa8a151d789 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -152,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); *done = PFVF_STATUS_SUCCESS; - return 0; + return -EINVAL; } /* Write message address */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 00c5be8c55b8..a9e068423ba0 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -13618,16 +13618,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, if (stmpconf.flags) return -EINVAL; - switch (stmpconf.tx_type) { - case HWTSTAMP_TX_ON: - tg3_flag_set(tp, TX_TSTAMP_EN); - break; - case HWTSTAMP_TX_OFF: - tg3_flag_clear(tp, TX_TSTAMP_EN); - break; - default: + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; - } switch (stmpconf.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -13689,6 +13682,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, tw32(TG3_RX_PTP_CTL, tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + tg3_flag_set(tp, TX_TSTAMP_EN); + else + tg3_flag_clear(tp, TX_TSTAMP_EN); + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? -EFAULT : 0; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fb0edfe3d24..dbcd5262c016 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1758,7 +1758,7 @@ err: /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num, bool untagged, bool promiscuous) + u32 num, bool promiscuous) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1778,7 +1778,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, req->interface_id = if_id; req->promiscuous = promiscuous; - req->untagged = untagged; + req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; req->num_vlan = num; if (!promiscuous) { memcpy(req->normal_vlan, vtag_array, @@ -1847,7 +1847,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } + if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != + req->if_flags_mask) { + dev_warn(&adapter->pdev->dev, + "Cannot set rx filter flags 0x%x\n", + req->if_flags_mask); + dev_warn(&adapter->pdev->dev, + "Interface is capable of 0x%x flags only\n", + be_if_cap_flags(adapter)); + } + req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); + status = be_mcc_notify_wait(adapter); + err: spin_unlock_bh(&adapter->mcc_lock); return status; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index edf3e8a0ff83..0075686276aa 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1984,7 +1984,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, char *fw_on_flash); int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num, bool untagged, bool promiscuous); + u32 num, bool promiscuous); int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eaecaadfa8c5..abde97471636 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1079,7 +1079,7 @@ static int be_vid_config(struct be_adapter *adapter) vids[num++] = cpu_to_le16(i); status = be_cmd_vlan_config(adapter, adapter->if_handle, - vids, num, 1, 0); + vids, num, 0); if (status) { /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -2676,6 +2676,11 @@ static int be_close(struct net_device *netdev) be_rx_qs_destroy(adapter); + for (i = 1; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; + for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) synchronize_irq(be_msix_vec_get(adapter, eqo)); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b2793b91cc55..4cbebf3d80eb 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -386,7 +386,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) */ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + bdp->cbd_bufaddr = 0; + fep->tx_skbuff[index] = NULL; + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_OK; + } /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ @@ -861,6 +868,7 @@ fec_enet_rx(struct net_device *ndev, int budget) struct bufdesc_ex *ebdp = NULL; bool vlan_packet_rcvd = false; u16 vlan_tag; + int index = 0; #ifdef CONFIG_M532x flush_cache_all(); @@ -916,10 +924,15 @@ fec_enet_rx(struct net_device *ndev, int budget) ndev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - data = (__u8*)__va(bdp->cbd_bufaddr); - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->rx_bd_base; + else + index = bdp - fep->rx_bd_base; + data = fep->rx_skbuff[index]->data; + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); @@ -999,8 +1012,8 @@ fec_enet_rx(struct net_device *ndev, int budget) napi_gro_receive(&fep->napi, skb); } - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -1719,6 +1732,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + fec_enet_free_buffers(ndev); + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } bdp->cbd_sc = BD_ENET_RX_EMPTY; if (fep->bufdesc_ex) { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index aedd5736a87d..8d3945ab7334 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3482,10 +3482,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) * specified. Matching the kind of event packet is not supported, with the * exception of "all V2 events regardless of level 2 or 4". **/ -static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, + struct hwtstamp_config *config) { struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config *config = &adapter->hwtstamp_config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 rxmtrl = 0; @@ -3586,6 +3586,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) return -ERANGE; } + adapter->hwtstamp_config = *config; + /* enable/disable Tx h/w time stamping */ regval = er32(TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; @@ -3874,7 +3876,7 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); /* initialize systim and reset the ns time counter */ - e1000e_config_hwtstamp(adapter); + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); /* Set EEE advertisement as appropriate */ if (adapter->flags2 & FLAG2_HAS_EEE) { @@ -5797,14 +5799,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - adapter->hwtstamp_config = config; - - ret_val = e1000e_config_hwtstamp(adapter); + ret_val = e1000e_config_hwtstamp(adapter, &config); if (ret_val) return ret_val; - config = adapter->hwtstamp_config; - switch (config.rx_filter) { case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 00cd36e08601..61088a6a9424 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2890,7 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) PHY_INTERFACE_MODE_GMII); if (!mp->phy) err = -ENODEV; - phy_addr_set(mp, mp->phy->addr); + else + phy_addr_set(mp, mp->phy->addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index fda26679f7d5..194928214606 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1774,7 +1774,7 @@ void mlx4_opreq_action(struct work_struct *work) MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) { - mlx4_err(dev, "Failed to retreive required operation: %d\n", + mlx4_err(dev, "Failed to retrieve required operation: %d\n", err); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 6ca30739625f..8675d26a678b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -98,6 +98,7 @@ enum { static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, + void *uout, int uout_size, mlx5_cmd_cbk_t cbk, void *context, int page_queue) { @@ -110,6 +111,8 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, ent->in = in; ent->out = out; + ent->uout = uout; + ent->uout_size = uout_size; ent->callback = cbk; ent->context = context; ent->cmd = cmd; @@ -534,6 +537,7 @@ static void cmd_work_handler(struct work_struct *work) ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + ent->op = be32_to_cpu(lay->in[0]) >> 16; if (ent->in->next) lay->in_ptr = cpu_to_be64(ent->in->next->dma); lay->inlen = cpu_to_be32(ent->in->len); @@ -628,7 +632,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) * 2. page queue commands do not support asynchrous completion */ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, - struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback, + struct mlx5_cmd_msg *out, void *uout, int uout_size, + mlx5_cmd_cbk_t callback, void *context, int page_queue, u8 *status) { struct mlx5_cmd *cmd = &dev->cmd; @@ -642,7 +647,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (callback && page_queue) return -EINVAL; - ent = alloc_cmd(cmd, in, out, callback, context, page_queue); + ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, + page_queue); if (IS_ERR(ent)) return PTR_ERR(ent); @@ -670,10 +676,10 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", @@ -826,7 +832,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, int n; int i; - msg = kzalloc(sizeof(*msg), GFP_KERNEL); + msg = kzalloc(sizeof(*msg), flags); if (!msg) return ERR_PTR(-ENOMEM); @@ -1109,6 +1115,19 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) up(&cmd->sem); } +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) +{ + unsigned long flags; + + if (msg->cache) { + spin_lock_irqsave(&msg->cache->lock, flags); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock_irqrestore(&msg->cache->lock, flags); + } else { + mlx5_free_cmd_msg(dev, msg); + } +} + void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) { struct mlx5_cmd *cmd = &dev->cmd; @@ -1117,6 +1136,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) void *context; int err; int i; + ktime_t t1, t2, delta; + s64 ds; + struct mlx5_cmd_stats *stats; + unsigned long flags; for (i = 0; i < (1 << cmd->log_sz); i++) { if (test_bit(i, &vector)) { @@ -1141,9 +1164,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) } free_ent(cmd, ent->idx); if (ent->callback) { + t1 = timespec_to_ktime(ent->ts1); + t2 = timespec_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + if (ent->op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[ent->op]; + spin_lock_irqsave(&stats->lock, flags); + stats->sum += ds; + ++stats->n; + spin_unlock_irqrestore(&stats->lock, flags); + } + callback = ent->callback; context = ent->context; err = ent->ret; + if (!err) + err = mlx5_copy_from_msg(ent->uout, + ent->out, + ent->uout_size); + + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + free_cmd(ent); callback(err, context); } else { @@ -1160,7 +1203,8 @@ static int status_to_err(u8 status) return status ? -1 : 0; /* TBD more meaningful codes */ } -static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) +static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, + gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); struct mlx5_cmd *cmd = &dev->cmd; @@ -1172,7 +1216,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) ent = &cmd->cache.med; if (ent) { - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { msg = list_entry(ent->head.next, typeof(*msg), list); /* For cached lists, we must explicitly state what is @@ -1181,43 +1225,34 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) msg->len = in_size; list_del(&msg->list); } - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); } if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size); + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); return msg; } -static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) -{ - if (msg->cache) { - spin_lock(&msg->cache->lock); - list_add_tail(&msg->list, &msg->cache->head); - spin_unlock(&msg->cache->lock); - } else { - mlx5_free_cmd_msg(dev, msg); - } -} - static int is_manage_pages(struct mlx5_inbox_hdr *in) { return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; } -int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, - int out_size) +static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size, mlx5_cmd_cbk_t callback, void *context) { struct mlx5_cmd_msg *inb; struct mlx5_cmd_msg *outb; int pages_queue; + gfp_t gfp; int err; u8 status = 0; pages_queue = is_manage_pages(in); + gfp = callback ? GFP_ATOMIC : GFP_KERNEL; - inb = alloc_msg(dev, in_size); + inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); return err; @@ -1229,13 +1264,14 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, goto out_in; } - outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size); + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } - err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status); + err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, + pages_queue, &status); if (err) goto out_out; @@ -1248,14 +1284,30 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, err = mlx5_copy_from_msg(out, outb, out_size); out_out: - mlx5_free_cmd_msg(dev, outb); + if (!callback) + mlx5_free_cmd_msg(dev, outb); out_in: - free_msg(dev, inb); + if (!callback) + free_msg(dev, inb); return err; } + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size) +{ + return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); +} EXPORT_SYMBOL(mlx5_cmd_exec); +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context) +{ + return cmd_exec(dev, in, in_size, out, out_size, callback, context); +} +EXPORT_SYMBOL(mlx5_cmd_exec_cb); + static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 9c7194b26ee2..80f6d127257a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -154,10 +154,10 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, return 0; stats = filp->private_data; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); if (stats->n) field = div64_u64(stats->sum, stats->n); - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); if (ret > 0) { if (copy_to_user(buf, tbuf, ret)) @@ -175,10 +175,10 @@ static ssize_t average_write(struct file *filp, const char __user *buf, struct mlx5_cmd_stats *stats; stats = filp->private_data; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); stats->sum = 0; stats->n = 0; - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); *pos += count; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 2231d93cc7ad..64a61b286b2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -354,7 +354,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); in->ctx.intr = vecidx; - in->ctx.log_page_size = PAGE_SHIFT - 12; + in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in->events_mask = cpu_to_be64(mask); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index bc0f5fb66e24..40a9f5ed814d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -159,6 +159,36 @@ struct mlx5_reg_host_endianess { u8 rsvd[15]; }; + +#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) + +enum { + MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | + CAP_MASK(MLX5_CAP_OFF_DCT, 1), +}; + +/* selectively copy writable fields clearing any reserved area + */ +static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from) +{ + u64 v64; + + to->log_max_qp = from->log_max_qp & 0x1f; + to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f; + to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; + to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; + to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; + to->log_max_atomic_size_qp = from->log_max_atomic_size_qp; + to->log_max_atomic_size_dc = from->log_max_atomic_size_dc; + v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK; + to->flags = cpu_to_be64(v64); +} + +enum { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + static int handle_hca_cap(struct mlx5_core_dev *dev) { struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; @@ -180,7 +210,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) } query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); - query_ctx.hdr.opmod = cpu_to_be16(0x1); + query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR); err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), query_out, sizeof(*query_out)); if (err) @@ -192,8 +222,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) goto query_ex; } - memcpy(&set_ctx->hca_cap, &query_out->hca_cap, - sizeof(set_ctx->hca_cap)); + copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 5b44e2e46daf..35e514dc7b7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -37,31 +37,41 @@ #include "mlx5_core.h" int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - struct mlx5_create_mkey_mbox_in *in, int inlen) + struct mlx5_create_mkey_mbox_in *in, int inlen, + mlx5_cmd_cbk_t callback, void *context, + struct mlx5_create_mkey_mbox_out *out) { - struct mlx5_create_mkey_mbox_out out; + struct mlx5_create_mkey_mbox_out lout; int err; u8 key; - memset(&out, 0, sizeof(out)); - spin_lock(&dev->priv.mkey_lock); + memset(&lout, 0, sizeof(lout)); + spin_lock_irq(&dev->priv.mkey_lock); key = dev->priv.mkey_key++; - spin_unlock(&dev->priv.mkey_lock); + spin_unlock_irq(&dev->priv.mkey_lock); in->seg.qpn_mkey7_0 |= cpu_to_be32(key); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (callback) { + err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), + callback, context); + return err; + } else { + err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); + } + if (err) { mlx5_core_dbg(dev, "cmd exec faile %d\n", err); return err; } - if (out.hdr.status) { - mlx5_core_dbg(dev, "status %d\n", out.hdr.status); - return mlx5_cmd_status_to_err(&out.hdr); + if (lout.hdr.status) { + mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); + return mlx5_cmd_status_to_err(&lout.hdr); } - mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; - mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; + mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", + be32_to_cpu(lout.mkey), key, mr->key); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 7b12acf210f8..37b6ad1f9a1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -57,10 +57,13 @@ struct mlx5_pages_req { }; struct fw_page { - struct rb_node rb_node; - u64 addr; - struct page *page; - u16 func_id; + struct rb_node rb_node; + u64 addr; + struct page *page; + u16 func_id; + unsigned long bitmask; + struct list_head list; + unsigned free_count; }; struct mlx5_query_pages_inbox { @@ -94,6 +97,11 @@ enum { MAX_RECLAIM_TIME_MSECS = 5000, }; +enum { + MLX5_MAX_RECLAIM_TIME_MILI = 5000, + MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096, +}; + static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) { struct rb_root *root = &dev->priv.page_root; @@ -101,6 +109,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u struct rb_node *parent = NULL; struct fw_page *nfp; struct fw_page *tfp; + int i; while (*new) { parent = *new; @@ -113,25 +122,29 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u return -EEXIST; } - nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); if (!nfp) return -ENOMEM; nfp->addr = addr; nfp->page = page; nfp->func_id = func_id; + nfp->free_count = MLX5_NUM_4K_IN_PAGE; + for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) + set_bit(i, &nfp->bitmask); rb_link_node(&nfp->rb_node, parent, new); rb_insert_color(&nfp->rb_node, root); + list_add(&nfp->list, &dev->priv.free_list); return 0; } -static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) { struct rb_root *root = &dev->priv.page_root; struct rb_node *tmp = root->rb_node; - struct page *result = NULL; + struct fw_page *result = NULL; struct fw_page *tfp; while (tmp) { @@ -141,9 +154,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) } else if (tfp->addr > addr) { tmp = tmp->rb_right; } else { - rb_erase(&tfp->rb_node, root); - result = tfp->page; - kfree(tfp); + result = tfp; break; } } @@ -176,12 +187,98 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, return err; } +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) +{ + struct fw_page *fp; + unsigned n; + + if (list_empty(&dev->priv.free_list)) { + return -ENOMEM; + mlx5_core_warn(dev, "\n"); + } + + fp = list_entry(dev->priv.free_list.next, struct fw_page, list); + n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); + if (n >= MLX5_NUM_4K_IN_PAGE) { + mlx5_core_warn(dev, "alloc 4k bug\n"); + return -ENOENT; + } + clear_bit(n, &fp->bitmask); + fp->free_count--; + if (!fp->free_count) + list_del(&fp->list); + + *addr = fp->addr + n * 4096; + + return 0; +} + +static void free_4k(struct mlx5_core_dev *dev, u64 addr) +{ + struct fw_page *fwp; + int n; + + fwp = find_fw_page(dev, addr & PAGE_MASK); + if (!fwp) { + mlx5_core_warn(dev, "page not found\n"); + return; + } + + n = (addr & ~PAGE_MASK) % 4096; + fwp->free_count++; + set_bit(n, &fwp->bitmask); + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { + rb_erase(&fwp->rb_node, &dev->priv.page_root); + if (fwp->free_count != 1) + list_del(&fwp->list); + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(fwp->page); + kfree(fwp); + } else if (fwp->free_count == 1) { + list_add(&fwp->list, &dev->priv.free_list); + } +} + +static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) +{ + struct page *page; + u64 addr; + int err; + + page = alloc_page(GFP_HIGHUSER); + if (!page) { + mlx5_core_warn(dev, "failed to allocate page\n"); + return -ENOMEM; + } + addr = dma_map_page(&dev->pdev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + mlx5_core_warn(dev, "failed dma mapping page\n"); + err = -ENOMEM; + goto out_alloc; + } + err = insert_page(dev, addr, page, func_id); + if (err) { + mlx5_core_err(dev, "failed to track allocated page\n"); + goto out_mapping; + } + + return 0; + +out_mapping: + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + +out_alloc: + __free_page(page); + + return err; +} static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { struct mlx5_manage_pages_inbox *in; struct mlx5_manage_pages_outbox out; - struct page *page; + struct mlx5_manage_pages_inbox *nin; int inlen; u64 addr; int err; @@ -196,27 +293,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { - page = alloc_page(GFP_HIGHUSER); - if (!page) { - err = -ENOMEM; - mlx5_core_warn(dev, "failed to allocate page\n"); - goto out_alloc; - } - addr = dma_map_page(&dev->pdev->dev, page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->pdev->dev, addr)) { - mlx5_core_warn(dev, "failed dma mapping page\n"); - __free_page(page); - err = -ENOMEM; - goto out_alloc; - } - err = insert_page(dev, addr, page, func_id); +retry: + err = alloc_4k(dev, &addr); if (err) { - mlx5_core_err(dev, "failed to track allocated page\n"); - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - err = -ENOMEM; - goto out_alloc; + if (err == -ENOMEM) + err = alloc_system_page(dev, func_id); + if (err) + goto out_4k; + + goto retry; } in->pas[i] = cpu_to_be64(addr); } @@ -226,7 +311,6 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in->func_id = cpu_to_be16(func_id); in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - mlx5_core_dbg(dev, "err %d\n", err); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_alloc; @@ -247,25 +331,22 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, out_alloc: if (notify_fail) { - memset(in, 0, inlen); - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); - if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out))) - mlx5_core_warn(dev, "\n"); - } - for (i--; i >= 0; i--) { - addr = be64_to_cpu(in->pas[i]); - page = remove_page(dev, addr); - if (!page) { - mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n", - addr); - continue; + nin = kzalloc(sizeof(*nin), GFP_KERNEL); + if (!nin) { + mlx5_core_warn(dev, "allocation failed\n"); + goto out_4k; } - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); + memset(&out, 0, sizeof(out)); + nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); + if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) + mlx5_core_warn(dev, "page notify failed\n"); + kfree(nin); } +out_4k: + for (i--; i >= 0; i--) + free_4k(dev, be64_to_cpu(in->pas[i])); out_free: mlx5_vfree(in); return err; @@ -276,7 +357,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, { struct mlx5_manage_pages_inbox in; struct mlx5_manage_pages_outbox *out; - struct page *page; int num_claimed; int outlen; u64 addr; @@ -315,13 +395,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, for (i = 0; i < num_claimed; i++) { addr = be64_to_cpu(out->pas[i]); - page = remove_page(dev, addr); - if (!page) { - mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr); - } else { - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - } + free_4k(dev, addr); } out_free: @@ -381,14 +455,19 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) return give_pages(dev, func_id, npages, 0); } +enum { + MLX5_BLKS_FOR_RECLAIM_PAGES = 12 +}; + static int optimal_reclaimed_pages(void) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_layout *lay; int ret; - ret = (sizeof(lay->in) + sizeof(block->data) - - sizeof(struct mlx5_manage_pages_outbox)) / 8; + ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - + sizeof(struct mlx5_manage_pages_outbox)) / + FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); return ret; } @@ -427,6 +506,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) void mlx5_pagealloc_init(struct mlx5_core_dev *dev) { dev->priv.page_root = RB_ROOT; + INIT_LIST_HEAD(&dev->priv.free_list); } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0951f7aca1ef..822616e3c375 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - &ctl->sg, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!ctl->adesc) return NETDEV_TX_BUSY; @@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_dma_len(sg) = DMA_BUFFER_SIZE; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - sg, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!ctl->adesc) goto out; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5a0f04c2c813..27ffe0ebf0a6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -245,16 +245,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - adapter->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - adapter->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -284,6 +276,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index a01a6a74ee3a..b1cb0ffb15c7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1730,7 +1730,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter, struct qlcnic_hardware_context *ahw = adapter->ahw; int temp; - netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n", + netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n", ahw->extend_lb_time); temp = ahw->extend_lb_time * 1000; *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d4ccd35a016..8a7a23a84ac5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -435,16 +435,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (config.flags) return -EINVAL; - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - priv->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - priv->hwts_tx_en = 1; - break; - default: + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } if (priv->adv_ts) { switch (config.rx_filter) { @@ -576,6 +569,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) } } priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; if (!priv->hwts_tx_en && !priv->hwts_rx_en) priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 90d41d26ec6d..7536a4c01293 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -967,14 +967,19 @@ static inline void cpsw_add_dual_emac_def_ale_entries( priv->host_port, ALE_VLAN, slave->port_vlan); } -static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +static void soft_reset_slave(struct cpsw_slave *slave) { char name[32]; - u32 slave_port; - - sprintf(name, "slave-%d", slave->slave_num); + snprintf(name, sizeof(name), "slave-%d", slave->slave_num); soft_reset(name, &slave->sliver->soft_reset); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + soft_reset_slave(slave); /* setup priority mapping */ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); @@ -1323,6 +1328,10 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2) + return -EOPNOTSUPP; + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -1330,16 +1339,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (cfg.flags) return -EINVAL; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - cpts->tx_enable = 0; - break; - case HWTSTAMP_TX_ON: - cpts->tx_enable = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -1366,6 +1367,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; + switch (priv->version) { case CPSW_VERSION_1: cpsw_hwtstamp_v1(priv); @@ -1374,7 +1377,7 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) cpsw_hwtstamp_v2(priv); break; default: - return -ENOTSUPP; + WARN_ON(1); } return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; @@ -2173,8 +2176,9 @@ static int cpsw_suspend(struct device *dev) if (netif_running(ndev)) cpsw_ndo_stop(ndev); - soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); - soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); + + for_each_slave(priv, soft_reset_slave); + pm_runtime_put_sync(&pdev->dev); /* Select sleep pin state */ diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index e78802e75ea6..bcc224a83734 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -389,16 +389,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ch = PORT2CHANNEL(port); regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - port->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - port->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -416,6 +408,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 7bbd318bc93e..befa45f809c3 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -627,7 +627,7 @@ static int ali_ircc_setup(chipio_t *info) /* * Function ali_ircc_read_dongle_id (int index, info) * - * Try to read dongle indentification. This procedure needs to be executed + * Try to read dongle identification. This procedure needs to be executed * once after power-on/reset. It also needs to be used whenever you suspect * that the user may have plugged/unplugged the IrDA Dongle. */ diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index ceeb53737f86..66bc03bdb138 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -1035,7 +1035,7 @@ static int nsc_ircc_setup(chipio_t *info) /* * Function nsc_ircc_read_dongle_id (void) * - * Try to read dongle indentification. This procedure needs to be executed + * Try to read dongle identification. This procedure needs to be executed * once after power-on/reset. It also needs to be used whenever you suspect * that the user may have plugged/unplugged the IrDA Dongle. */ diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9dccb1edfd2a..dc76670c2f2a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -628,6 +628,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, const struct iovec *iv, unsigned long total_len, size_t count, int noblock) { + int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long len = total_len; @@ -670,6 +671,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; + if (copylen > good_linear) + copylen = good_linear; linear = copylen; if (iov_pages(iv, vnet_hdr_len + copylen, count) <= MAX_SKB_FRAGS) @@ -678,7 +681,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!zerocopy) { copylen = len; - linear = vnet_hdr.hdr_len; + if (vnet_hdr.hdr_len > good_linear) + linear = good_linear; + else + linear = vnet_hdr.hdr_len; } skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6574eb8766f9..34b0de09d881 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2650,7 +2650,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, return err; } -static struct genl_ops team_nl_ops[] = { +static const struct genl_ops team_nl_ops[] = { { .cmd = TEAM_CMD_NOOP, .doit = team_nl_cmd_noop, @@ -2676,15 +2676,15 @@ static struct genl_ops team_nl_ops[] = { }, }; -static struct genl_multicast_group team_change_event_mcgrp = { - .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, +static const struct genl_multicast_group team_nl_mcgrps[] = { + { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, }; static int team_nl_send_multicast(struct sk_buff *skb, struct team *team, u32 portid) { - return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, - team_change_event_mcgrp.id, GFP_KERNEL); + return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), + skb, 0, 0, GFP_KERNEL); } static int team_nl_send_event_options_get(struct team *team, @@ -2703,23 +2703,8 @@ static int team_nl_send_event_port_get(struct team *team, static int team_nl_init(void) { - int err; - - err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, - ARRAY_SIZE(team_nl_ops)); - if (err) - return err; - - err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); - if (err) - goto err_change_event_grp_reg; - - return 0; - -err_change_event_grp_reg: - genl_unregister_family(&team_nl_family); - - return err; + return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops, + team_nl_mcgrps); } static void team_nl_fini(void) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7cb105c103fe..782e38bfc1ee 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb; size_t len = total_len, align = NET_SKB_PAD, linear; struct virtio_net_hdr gso = { 0 }; + int good_linear; int offset = 0; int copylen; bool zerocopy = false; @@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EINVAL; } + good_linear = SKB_MAX_HEAD(align); + if (msg_control) { /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; + if (copylen > good_linear) + copylen = good_linear; linear = copylen; if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) zerocopy = true; @@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (!zerocopy) { copylen = len; - linear = gso.hdr_len; + if (gso.hdr_len > good_linear) + linear = good_linear; + else + linear = gso.hdr_len; } skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f74786aa37be..e15ec2b12035 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -66,7 +66,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); static struct usb_driver cdc_ncm_driver; -static u8 cdc_ncm_setup(struct usbnet *dev) +static int cdc_ncm_setup(struct usbnet *dev) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; struct usb_cdc_ncm_ntb_parameters ncm_parm; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 90a429b7ebad..8494bb53ebdc 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -204,9 +204,6 @@ static void intr_complete (struct urb *urb) break; } - if (!netif_running (dev->net)) - return; - status = usb_submit_urb (urb, GFP_ATOMIC); if (status != 0) netif_err(dev, timer, dev->net, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cdc7c90a6a9e..7bab4de658a9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -36,7 +36,10 @@ module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ -#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \ + sizeof(struct virtio_net_hdr_mrg_rxbuf), \ + L1_CACHE_BYTES)) #define GOOD_COPY_LEN 128 #define VIRTNET_DRIVER_VERSION "1.0.0" @@ -314,10 +317,10 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) head_skb->dev->stats.rx_length_errors++; return -EINVAL; } - if (unlikely(len > MAX_PACKET_LEN)) { + if (unlikely(len > MERGE_BUFFER_LEN)) { pr_debug("%s: rx error: merge buffer too long\n", head_skb->dev->name); - len = MAX_PACKET_LEN; + len = MERGE_BUFFER_LEN; } if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); @@ -336,18 +339,17 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) if (curr_skb != head_skb) { head_skb->data_len += len; head_skb->len += len; - head_skb->truesize += MAX_PACKET_LEN; + head_skb->truesize += MERGE_BUFFER_LEN; } page = virt_to_head_page(buf); offset = buf - (char *)page_address(page); if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { put_page(page); skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, - len, MAX_PACKET_LEN); + len, MERGE_BUFFER_LEN); } else { skb_add_rx_frag(curr_skb, num_skb_frags, page, - offset, len, - MAX_PACKET_LEN); + offset, len, MERGE_BUFFER_LEN); } --rq->num; } @@ -383,7 +385,7 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) struct page *page = virt_to_head_page(buf); skb = page_to_skb(rq, page, (char *)buf - (char *)page_address(page), - len, MAX_PACKET_LEN); + len, MERGE_BUFFER_LEN); if (unlikely(!skb)) { dev->stats.rx_dropped++; put_page(page); @@ -471,11 +473,11 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) struct skb_vnet_hdr *hdr; int err; - skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); + skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); if (unlikely(!skb)) return -ENOMEM; - skb_put(skb, MAX_PACKET_LEN); + skb_put(skb, GOOD_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); @@ -542,20 +544,20 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) int err; if (gfp & __GFP_WAIT) { - if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag, + if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag, gfp)) { buf = (char *)page_address(vi->alloc_frag.page) + vi->alloc_frag.offset; get_page(vi->alloc_frag.page); - vi->alloc_frag.offset += MAX_PACKET_LEN; + vi->alloc_frag.offset += MERGE_BUFFER_LEN; } } else { - buf = netdev_alloc_frag(MAX_PACKET_LEN); + buf = netdev_alloc_frag(MERGE_BUFFER_LEN); } if (!buf) return -ENOMEM; - sg_init_one(rq->sg, buf, MAX_PACKET_LEN); + sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1619,8 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev) if (err) goto free_stats; - netif_set_real_num_tx_queues(dev, 1); - netif_set_real_num_rx_queues(dev, 1); + netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); + netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); err = register_netdev(dev); if (err) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index f8d59c7b9082..9e86a811086f 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2363,7 +2363,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, break; default: ret = -ENODEV; - ath10k_err("Unkown device ID: %d\n", pci_dev->device); + ath10k_err("Unknown device ID: %d\n", pci_dev->device); goto err_ar_pci; } diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c index ce86f158423b..ba200b24be64 100644 --- a/drivers/net/wireless/ath/ath5k/dma.c +++ b/drivers/net/wireless/ath/ath5k/dma.c @@ -661,7 +661,7 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, AR5K_SISR1_QCU_TXEOL); - /* Currently this is not much usefull since we treat + /* Currently this is not much useful since we treat * all queues the same way if we get a TXURN (update * tx trigger level) but we might need it later on*/ if (pisr & AR5K_ISR_TXURN) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index b07f164d65cf..20e49095db2a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -187,17 +187,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniCckfirJapan2484, ar9485_1_1_baseband_core_txfir_coeff_japan_2484); - /* Load PCIE SERDES settings from INI */ - - /* Awake Setting */ - - INIT_INI_ARRAY(&ah->iniPcieSerdes, - ar9485_1_1_pcie_phy_clkreq_disable_L1); - - /* Sleep Setting */ - - INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, - ar9485_1_1_pcie_phy_clkreq_disable_L1); + if (ah->config.no_pll_pwrsave) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9485_1_1_pcie_phy_clkreq_disable_L1); + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9485_1_1_pcie_phy_clkreq_disable_L1); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); + } } else if (AR_SREV_9462_21(ah)) { INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p1_mac_core); diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 6f899c692647..7c1845221e1c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -32,13 +32,6 @@ static const u32 ar9485_1_1_mac_postamble[][5] = { {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, }; -static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18012e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { /* Addr allmodes */ {0x00009e00, 0x037216a0}, @@ -1101,20 +1094,6 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = { {0x0000a1fc, 0x00000296}, }; -static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18052e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - -static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18053e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485_1_1_soc_preamble[][2] = { /* Addr allmodes */ {0x00004014, 0xba280400}, @@ -1173,13 +1152,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, }; -static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18013e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485_1_1_radio_postamble[][2] = { /* Addr allmodes */ {0x0001609c, 0x0b283f31}, @@ -1358,4 +1330,18 @@ static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = { {0x0000a3a0, 0xca9228ee}, }; +static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x18013e5e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + +static const u32 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x1801265e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + #endif /* INITVALS_9485_H */ diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index e7a38d844a6a..60a5da53668f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -632,15 +632,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); /* Main driver core */ /********************/ -#define ATH9K_PCI_CUS198 0x0001 -#define ATH9K_PCI_CUS230 0x0002 -#define ATH9K_PCI_CUS217 0x0004 -#define ATH9K_PCI_CUS252 0x0008 -#define ATH9K_PCI_WOW 0x0010 -#define ATH9K_PCI_BT_ANT_DIV 0x0020 -#define ATH9K_PCI_D3_L1_WAR 0x0040 -#define ATH9K_PCI_AR9565_1ANT 0x0080 -#define ATH9K_PCI_AR9565_2ANT 0x0100 +#define ATH9K_PCI_CUS198 0x0001 +#define ATH9K_PCI_CUS230 0x0002 +#define ATH9K_PCI_CUS217 0x0004 +#define ATH9K_PCI_CUS252 0x0008 +#define ATH9K_PCI_WOW 0x0010 +#define ATH9K_PCI_BT_ANT_DIV 0x0020 +#define ATH9K_PCI_D3_L1_WAR 0x0040 +#define ATH9K_PCI_AR9565_1ANT 0x0080 +#define ATH9K_PCI_AR9565_2ANT 0x0100 +#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200 /* * Default cache line size, in bytes. diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 90b8342d1ed4..8824610c21fb 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -44,14 +44,20 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, if (buf == NULL) return -ENOMEM; - if (sc->dfs_detector) - dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); - len += scnprintf(buf + len, size - len, "DFS support for " "macVersion = 0x%x, macRev = 0x%x: %s\n", hw_ver->macVersion, hw_ver->macRev, (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? "enabled" : "disabled"); + + if (!sc->dfs_detector) { + len += scnprintf(buf + len, size - len, + "DFS detector not enabled\n"); + goto exit; + } + + dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); + len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); ATH9K_DFS_STAT("pulse events reported ", pulses_total); ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); @@ -76,6 +82,7 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error); ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used); +exit: if (len > size) len = size; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9ea24f1cba73..a2c9a5dbac6b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -316,6 +316,7 @@ struct ath9k_ops_config { u32 ant_ctrl_comm2g_switch_enable; bool xatten_margin_cfg; bool alt_mingainidx; + bool no_pll_pwrsave; }; enum ath9k_int { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d8643ebabd30..710192ed27ed 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -609,6 +609,11 @@ static void ath9k_init_platform(struct ath_softc *sc) ah->config.pcie_waen = 0x0040473b; ath_info(common, "Enable WAR for ASPM D3/L1\n"); } + + if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) { + ah->config.no_pll_pwrsave = true; + ath_info(common, "Disable PLL PowerSave\n"); + } } static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, @@ -863,8 +868,8 @@ static const struct ieee80211_iface_combination if_comb[] = { .max_interfaces = 1, .num_different_channels = 1, .beacon_int_infra_match = true, - .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) | - BIT(NL80211_CHAN_HT20), + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20), } }; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7e4c2524b630..b5656fce4ff5 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -195,6 +195,93 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 0x3219), .driver_data = ATH9K_PCI_BT_ANT_DIV }, + /* AR9485 cards with PLL power-save disabled by default. */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2C97), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2100), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1C56, /* ASKEY */ + 0x4001), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x11AD, /* LITEON */ + 0x6627), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x11AD, /* LITEON */ + 0x6628), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_FOXCONN, + 0xE04E), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_FOXCONN, + 0xE04F), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x144F, /* ASKEY */ + 0x7197), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x2000), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x2001), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1186), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1F86), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1195), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1F95), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x1C00), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x1C01), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_ASUSTEK, + 0x850D), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c index 5b84f7ae0b1e..ef44a2da644d 100644 --- a/drivers/net/wireless/ath/wcn36xx/debug.c +++ b/drivers/net/wireless/ath/wcn36xx/debug.c @@ -126,7 +126,7 @@ static ssize_t write_file_dump(struct file *file, if (begin == NULL) break; - if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0) + if (kstrtou32(begin, 0, &arg[i]) != 0) break; } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index f8c3a10510c2..de9eb2cfbf4b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1286,7 +1286,8 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, } else { wcn36xx_err("Beacon is to big: beacon size=%d\n", msg_body.beacon_length); - return -ENOMEM; + ret = -ENOMEM; + goto out; } memcpy(msg_body.bssid, vif->addr, ETH_ALEN); @@ -1327,7 +1328,8 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn, if (skb->len > BEACON_TEMPLATE_SIZE) { wcn36xx_warn("probe response template is too big: %d\n", skb->len); - return -E2BIG; + ret = -E2BIG; + goto out; } msg.probe_resp_template_len = skb->len; @@ -1606,7 +1608,8 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn, /* TODO: it also support ARP response type */ } else { wcn36xx_warn("unknow keep alive packet type %d\n", packet_type); - return -EINVAL; + ret = -EINVAL; + goto out; } PREPARE_HAL_BUF(wcn->hal_buf, msg_body); diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 668dd27616a0..cc6a0a586f0b 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, char *p2; struct debug_data *d = f->private_data; - pdata = kmalloc(cnt, GFP_KERNEL); + if (cnt == 0) + return 0; + + pdata = kmalloc(cnt + 1, GFP_KERNEL); if (pdata == NULL) return 0; @@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, kfree(pdata); return 0; } + pdata[cnt] = '\0'; p0 = pdata; for (i = 0; i < num_of_items; i++) { diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index ef8c98e21098..f499efc6abcf 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -902,6 +902,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) if (card->model == MODEL_UNKNOWN) { pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", p_dev->manf_id, p_dev->card_id); + ret = -ENODEV; goto out2; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index de0df86704e7..9df7bc91a26f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2097,7 +2097,7 @@ out: } /* Generic Netlink operations array */ -static struct genl_ops hwsim_ops[] = { +static const struct genl_ops hwsim_ops[] = { { .cmd = HWSIM_CMD_REGISTER, .policy = hwsim_genl_policy, @@ -2148,8 +2148,7 @@ static int hwsim_init_netlink(void) printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); - rc = genl_register_family_with_ops(&hwsim_genl_family, - hwsim_ops, ARRAY_SIZE(hwsim_ops)); + rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops); if (rc) goto failure; diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index f80f30b6160e..c8385ec77a86 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -1020,8 +1020,8 @@ struct mwifiex_power_group { } __packed; struct mwifiex_types_power_group { - u16 type; - u16 length; + __le16 type; + __le16 length; } __packed; struct host_cmd_ds_txpwr_cfg { diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c index 220af4fe0fc6..81ac001ee741 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/mwifiex/ie.c @@ -82,7 +82,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, struct mwifiex_ie_list *ie_list) { u16 travel_len, index, mask; - s16 input_len; + s16 input_len, tlv_len; struct mwifiex_ie *ie; u8 *tmp; @@ -91,11 +91,13 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, ie_list->len = 0; - while (input_len > 0) { + while (input_len >= sizeof(struct mwifiex_ie_types_header)) { ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len); - input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; - travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; + tlv_len = le16_to_cpu(ie->ie_length); + travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE; + if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE) + return -1; index = le16_to_cpu(ie->ie_index); mask = le16_to_cpu(ie->mgmt_subtype_mask); @@ -132,6 +134,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, le16_add_cpu(&ie_list->len, le16_to_cpu(priv->mgmt_ie[index].ie_length) + MWIFIEX_IE_HDR_SIZE); + input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE; } if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 1576104e3d95..9bf8898743ab 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -1029,7 +1029,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb, u32 upld_typ) { u8 *cmd_buf; + __le16 *curr_ptr = (__le16 *)skb->data; + u16 pkt_len = le16_to_cpu(*curr_ptr); + skb_trim(skb, pkt_len); skb_pull(skb, INTF_HEADER_LEN); switch (upld_typ) { diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 7d66018a2e33..2181ee283d82 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -239,14 +239,14 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd, memmove(cmd_txp_cfg, txp, sizeof(struct host_cmd_ds_txpwr_cfg) + sizeof(struct mwifiex_types_power_group) + - pg_tlv->length); + le16_to_cpu(pg_tlv->length)); pg_tlv = (struct mwifiex_types_power_group *) ((u8 *) cmd_txp_cfg + sizeof(struct host_cmd_ds_txpwr_cfg)); cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) + sizeof(struct mwifiex_types_power_group) + - pg_tlv->length); + le16_to_cpu(pg_tlv->length)); } else { memmove(cmd_txp_cfg, txp, sizeof(*txp)); } diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 58a6013712d2..2675ca7f8d14 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -274,17 +274,20 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg; struct mwifiex_rate_scope *rate_scope; struct mwifiex_ie_types_header *head; - u16 tlv, tlv_buf_len; + u16 tlv, tlv_buf_len, tlv_buf_left; u8 *tlv_buf; u32 i; - tlv_buf = ((u8 *)rate_cfg) + - sizeof(struct host_cmd_ds_tx_rate_cfg); - tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16))); + tlv_buf = ((u8 *)rate_cfg) + sizeof(struct host_cmd_ds_tx_rate_cfg); + tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*rate_cfg); - while (tlv_buf && tlv_buf_len > 0) { - tlv = (*tlv_buf); - tlv = tlv | (*(tlv_buf + 1) << 8); + while (tlv_buf_left >= sizeof(*head)) { + head = (struct mwifiex_ie_types_header *)tlv_buf; + tlv = le16_to_cpu(head->type); + tlv_buf_len = le16_to_cpu(head->len); + + if (tlv_buf_left < (sizeof(*head) + tlv_buf_len)) + break; switch (tlv) { case TLV_TYPE_RATE_SCOPE: @@ -304,9 +307,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, /* Add RATE_DROP tlv here */ } - head = (struct mwifiex_ie_types_header *) tlv_buf; - tlv_buf += le16_to_cpu(head->len) + sizeof(*head); - tlv_buf_len -= le16_to_cpu(head->len); + tlv_buf += (sizeof(*head) + tlv_buf_len); + tlv_buf_left -= (sizeof(*head) + tlv_buf_len); } priv->is_data_rate_auto = mwifiex_is_rate_auto(priv); @@ -340,13 +342,17 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg)); pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group)); - length = pg_tlv_hdr->length; - if (length > 0) { - max_power = pg->power_max; - min_power = pg->power_min; - length -= sizeof(struct mwifiex_power_group); - } - while (length) { + length = le16_to_cpu(pg_tlv_hdr->length); + + /* At least one structure required to update power */ + if (length < sizeof(struct mwifiex_power_group)) + return 0; + + max_power = pg->power_max; + min_power = pg->power_min; + length -= sizeof(struct mwifiex_power_group); + + while (length >= sizeof(struct mwifiex_power_group)) { pg++; if (max_power < pg->power_max) max_power = pg->power_max; @@ -356,10 +362,8 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) length -= sizeof(struct mwifiex_power_group); } - if (pg_tlv_hdr->length > 0) { - priv->min_tx_power_level = (u8) min_power; - priv->max_tx_power_level = (u8) max_power; - } + priv->min_tx_power_level = (u8) min_power; + priv->max_tx_power_level = (u8) max_power; return 0; } diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index f084412eee0b..c8e029df770e 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -638,8 +638,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, txp_cfg->mode = cpu_to_le32(1); pg_tlv = (struct mwifiex_types_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); - pg_tlv->type = TLV_TYPE_POWER_GROUP; - pg_tlv->length = 4 * sizeof(struct mwifiex_power_group); + pg_tlv->type = cpu_to_le16(TLV_TYPE_POWER_GROUP); + pg_tlv->length = + cpu_to_le16(4 * sizeof(struct mwifiex_power_group)); pg = (struct mwifiex_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg) + sizeof(struct mwifiex_types_power_group)); diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 1cfe5a738c47..92f76d655e6c 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -97,6 +97,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, struct mwifiex_txinfo *tx_info; int hdr_chop; struct timeval tv; + struct ethhdr *p_ethhdr; u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; uap_rx_pd = (struct uap_rxpd *)(skb->data); @@ -112,14 +113,36 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, } if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, - rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) + rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { + /* Replace the 803 header and rfc1042 header (llc/snap) with + * an Ethernet II header, keep the src/dst and snap_type + * (ethertype). + * + * The firmware only passes up SNAP frames converting all RX + * data from 802.11 to 802.2/LLC/SNAP frames. + * + * To create the Ethernet II, just move the src, dst address + * right before the snap_type. + */ + p_ethhdr = (struct ethhdr *) + ((u8 *)(&rx_pkt_hdr->eth803_hdr) + + sizeof(rx_pkt_hdr->eth803_hdr) + + sizeof(rx_pkt_hdr->rfc1042_hdr) + - sizeof(rx_pkt_hdr->eth803_hdr.h_dest) + - sizeof(rx_pkt_hdr->eth803_hdr.h_source) + - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type)); + memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, + sizeof(p_ethhdr->h_source)); + memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, + sizeof(p_ethhdr->h_dest)); /* Chop off the rxpd + the excess memory from * 802.2/llc/snap header that was removed. */ - hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd; - else + hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd; + } else { /* Chop off the rxpd */ hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; + } /* Chop off the leading header bytes so the it points * to the start of either the reconstructed EthII frame diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 5dd0ccc70b86..13eaeed03898 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -722,6 +722,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, tlv_hdr = (struct mwifiex_ie_types_data *) curr; tlv_len = le16_to_cpu(tlv_hdr->header.len); + if (resp_len < tlv_len + sizeof(tlv_hdr->header)) + break; + switch (le16_to_cpu(tlv_hdr->header.type)) { case TLV_TYPE_WMMQSTATUS: tlv_wmm_qstatus = diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 41a16d30c79c..e05d9b4c8317 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -811,6 +811,10 @@ static const struct net_device_ops islpci_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +static struct device_type wlan_type = { + .name = "wlan", +}; + struct net_device * islpci_setup(struct pci_dev *pdev) { @@ -821,9 +825,8 @@ islpci_setup(struct pci_dev *pdev) return ndev; pci_set_drvdata(pdev, ndev); -#if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); -#endif + SET_NETDEV_DEVTYPE(ndev, &wlan_type); /* setup the structure members */ ndev->base_addr = pci_resource_start(pdev, 0); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index c5738f14c4ba..776aff3678ff 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2640,7 +2640,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); - if (info->default_power1 > POWER_BOUND) + if (info->default_power2 > POWER_BOUND) rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND); else rt2x00_set_field8(&rfcsr, RFCSR50_TX, diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index a0935987fa3a..7f40ab8e1bd8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length); * @local: frame is not from mac80211 */ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, - bool local); + struct ieee80211_sta *sta, bool local); /** * rt2x00queue_update_beacon - Send new beacon from mac80211 diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 7c157857f5ce..2183e7978399 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, frag_skb->data, data_length, tx_info, (struct ieee80211_rts *)(skb->data)); - retval = rt2x00queue_write_tx_frame(queue, skb, true); + retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); if (retval) { dev_kfree_skb_any(skb); rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); @@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, goto exit_fail; } - if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) + if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) goto exit_fail; /* diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 50590b1420a5..a5d38e8ad9e4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry) } int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, - bool local) + struct ieee80211_sta *sta, bool local) { struct ieee80211_tx_info *tx_info; struct queue_entry *entry; @@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, * after that we are free to use the skb->cb array * for our information. */ - rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL); + rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); /* * All information is retrieved from the skb->cb array, diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 9a78e3daf742..ff784072fb42 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -37,6 +37,7 @@ #include <linux/ip.h> #include <linux/module.h> +#include <linux/udp.h> /* *NOTICE!!!: This file will be very big, we should @@ -1074,64 +1075,52 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) if (!ieee80211_is_data(fc)) return false; + ip = (const struct iphdr *)(skb->data + mac_hdr_len + + SNAP_SIZE + PROTOC_TYPE_SIZE); + ether_type = be16_to_cpup((__be16 *) + (skb->data + mac_hdr_len + SNAP_SIZE)); - ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + - SNAP_SIZE + PROTOC_TYPE_SIZE); - ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); - /* ether_type = ntohs(ether_type); */ - - if (ETH_P_IP == ether_type) { - if (IPPROTO_UDP == ip->protocol) { - struct udphdr *udp = (struct udphdr *)((u8 *) ip + - (ip->ihl << 2)); - if (((((u8 *) udp)[1] == 68) && - (((u8 *) udp)[3] == 67)) || - ((((u8 *) udp)[1] == 67) && - (((u8 *) udp)[3] == 68))) { - /* - * 68 : UDP BOOTP client - * 67 : UDP BOOTP server - */ - RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), - DBG_DMESG, "dhcp %s !!\n", - is_tx ? "Tx" : "Rx"); - - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv-> - works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = - jiffies; - } + switch (ether_type) { + case ETH_P_IP: { + struct udphdr *udp; + u16 src; + u16 dst; - return true; - } - } - } else if (ETH_P_ARP == ether_type) { - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = jiffies; - } + if (ip->protocol != IPPROTO_UDP) + return false; + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + src = be16_to_cpu(udp->source); + dst = be16_to_cpu(udp->dest); - return true; - } else if (ETH_P_PAE == ether_type) { + /* If this case involves port 68 (UDP BOOTP client) connecting + * with port 67 (UDP BOOTP server), then return true so that + * the lowest speed is used. + */ + if (!((src == 68 && dst == 67) || (src == 67 && dst == 68))) + return false; + + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, + "dhcp %s !!\n", is_tx ? "Tx" : "Rx"); + break; + } + case ETH_P_ARP: + break; + case ETH_P_PAE: RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); - - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = jiffies; - } - - return true; - } else if (ETH_P_IPV6 == ether_type) { - /* IPv6 */ - return true; + break; + case ETH_P_IPV6: + /* TODO: Is this right? */ + return false; + default: + return false; } - - return false; + if (is_tx) { + rtlpriv->enter_ps = false; + schedule_work(&rtlpriv->works.lps_change_work); + ppsc->last_delaylps_stamp_jiffies = jiffies; + } + return true; } EXPORT_SYMBOL_GPL(rtl_is_special_data); diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index ae13fb94b2e8..2ffc7298f686 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c @@ -262,9 +262,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) sizeof(u8), GFP_ATOMIC); if (!efuse_tbl) return; - efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); + efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); if (!efuse_word) - goto done; + goto out; for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16), GFP_ATOMIC); @@ -378,6 +378,7 @@ done: for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) kfree(efuse_word[i]); kfree(efuse_word); +out: kfree(efuse_tbl); } diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 25e50ffc44ec..b0c346a9e4b8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -349,7 +349,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, p_drvinfo); } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index 945ddecf90c9..0eb0f4ae5920 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c @@ -525,7 +525,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, p_drvinfo); } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c index 5061f1db3f02..92d38ab3c60e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c @@ -265,7 +265,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw, rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]) { pwrdiff_limit[i] = - rtlefuse->pwrgroup_ht20 + rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]; } } else { diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 222d2e792ca6..27efbcdac6a9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -329,7 +329,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index d224dc3bb092..0c65386fa30d 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -77,11 +77,7 @@ #define RTL_SLOT_TIME_9 9 #define RTL_SLOT_TIME_20 20 -/*related with tcp/ip. */ -/*if_ehther.h*/ -#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */ -#define ETH_P_IP 0x0800 /*Internet Protocol packet */ -#define ETH_P_ARP 0x0806 /*Address Resolution packet */ +/*related to tcp/ip. */ #define SNAP_SIZE 6 #define PROTOC_TYPE_SIZE 2 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d85e66979711..e59acb1daa23 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -277,12 +277,13 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) if (!page) { kfree_skb(skb); no_skb: - /* Any skbuffs queued for refill? Force them out. */ - if (i != 0) - goto refill; /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); + + /* Any skbuffs queued for refill? Force them out. */ + if (i != 0) + goto refill; break; } diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 12a9e83c008b..d0222f13d154 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1034,10 +1034,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t pay_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - unsigned long flags; entry->len = len; @@ -1045,35 +1044,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, goto err; if (len < copy_bytes) - goto err1; + goto err_wait; device = chan->device; pay_off = (size_t) offset & ~PAGE_MASK; buff_off = (size_t) buf & ~PAGE_MASK; if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) - goto err1; + goto err_wait; - dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); - if (dma_mapping_error(device->dev, dest)) - goto err1; + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); + if (!unmap) + goto err_wait; - src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) - goto err2; + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), + pay_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; - flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_FROM_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[1])) + goto err_get_unmap; + + unmap->from_cnt = 1; + + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (!txd) - goto err3; + goto err_get_unmap; txd->callback = ntb_rx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err3; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); qp->last_cookie = cookie; @@ -1081,11 +1094,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, return; -err3: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); -err2: - dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); -err1: +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); +err_wait: /* If the callbacks come out of order, the writing of the index to the * last completed will be out of order. This may result in the * receive stalling forever. @@ -1245,12 +1258,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t dest_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; + dma_addr_t dest; dma_cookie_t cookie; void __iomem *offset; size_t len = entry->len; void *buf = entry->buf; - unsigned long flags; offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); @@ -1273,28 +1286,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) goto err; - src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); + if (!unmap) goto err; - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (!txd) - goto err1; + goto err_get_unmap; txd->callback = ntb_tx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err1; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); dma_async_issue_pending(chan); qp->tx_async++; return; -err1: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); err: ntb_memcpy_tx(entry, offset); qp->tx_memcpy++; diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index dc82ef096f3b..2872ece81f35 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -31,15 +31,18 @@ menuconfig PARPORT If unsure, say Y. +config ARCH_MIGHT_HAVE_PC_PARPORT + bool + help + Select this config option from the architecture Kconfig if + the architecture might have PC parallel port hardware. + if PARPORT config PARPORT_PC tristate "PC-style hardware" - depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \ - (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \ - !XTENSA && !CRIS - - ---help--- + depends on ARCH_MIGHT_HAVE_PC_PARPORT + help You should say Y here if you have a PC-style parallel port. All IBM PC compatible computers and some Alphas have PC-style parallel ports. PA-RISC owners should only say Y here if they diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 1ce8ee054f1a..a94d850ae228 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; } - handle = DEVICE_ACPI_HANDLE(&pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!handle) { /* * This hotplug controller was not listed in the ACPI name diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 26100f510b10..1592dbe4f904 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -176,7 +176,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot); /* variables */ -extern bool acpiphp_debug; extern bool acpiphp_disabled; #endif /* _ACPIPHP_H */ diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index ead7c534095e..cff7cadfc2e4 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c @@ -54,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev) { if (slot_detection_mode != PCIEHP_DETECT_ACPI) return 0; - if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) + if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev))) return 0; return -ENODEV; } @@ -96,7 +96,7 @@ static int __init dummy_probe(struct pcie_device *dev) dup_slot_id++; } list_add_tail(&slot->list, &dummy_slots); - handle = DEVICE_ACPI_HANDLE(&pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) acpi_slot_detected = 1; return -ENODEV; /* dummy driver always returns error */ diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index b2781dfe60e9..5b05a68cca6c 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -9,6 +9,7 @@ * Work to add BIOS PROM support was completed by Mike Habeck. */ +#include <linux/acpi.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> @@ -29,7 +30,6 @@ #include <asm/sn/sn_feature_sets.h> #include <asm/sn/sn_sal.h> #include <asm/sn/types.h> -#include <linux/acpi.h> #include <asm/sn/acpi.h> #include "../pci.h" @@ -414,7 +414,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) acpi_handle rethandle; acpi_status ret; - phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; + phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); if (acpi_bus_get_device(phandle, &pdevice)) { dev_dbg(&slot->pci_bus->self->dev, @@ -495,7 +495,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) /* free the ACPI resources for the slot */ if (SN_ACPI_BASE_SUPPORT() && - PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { + PCI_CONTROLLER(slot->pci_bus)->companion) { unsigned long long adr; struct acpi_device *device; acpi_handle phandle; @@ -504,7 +504,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) acpi_status ret; /* Get the rootbus node pointer */ - phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; + phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); acpi_scan_lock_acquire(); /* diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 1b90579b233a..50ce68098298 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c @@ -37,7 +37,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) char *type; struct resource *res; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle) return -EINVAL; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dfd1f59de729..f166126e28d1 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -173,14 +173,14 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) static bool acpi_pci_power_manageable(struct pci_dev *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_power_manageable(handle) : false; } static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); static const u8 state_conv[] = { [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, @@ -217,7 +217,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) static bool acpi_pci_can_wakeup(struct pci_dev *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_can_wakeup(handle) : false; } diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index edaed6f4da6c..d51f45aa669e 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -263,7 +263,7 @@ device_has_dsm(struct device *dev) acpi_handle handle; struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return FALSE; @@ -295,7 +295,7 @@ acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf) acpi_handle handle; int length; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return -1; @@ -316,7 +316,7 @@ acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf) acpi_handle handle; int length; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return -1; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 0846922b2316..829b98c5c66f 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1604,6 +1604,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc, pcs->write(mask, pcswi->reg); raw_spin_unlock(&pcs->lock); } + + if (pcs_soc->rearm) + pcs_soc->rearm(); } /** @@ -1626,8 +1629,6 @@ static void pcs_irq_unmask(struct irq_data *d) struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d); pcs_irq_set(pcs_soc, d->irq, true); - if (pcs_soc->rearm) - pcs_soc->rearm(); } /** @@ -1678,11 +1679,6 @@ static int pcs_irq_handle(struct pcs_soc_data *pcs_soc) } } - /* - * For debugging on omaps, you may want to call pcs_soc->rearm() - * here to see wake-up interrupts during runtime also. - */ - return count; } diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 605a9be55129..b9429fbf1cd8 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -519,7 +519,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) gmux_data->power_state = VGA_SWITCHEROO_ON; - gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); + gmux_data->dhandle = ACPI_HANDLE(&pnp->dev); if (!gmux_data->dhandle) { pr_err("Cannot find acpi handle for pnp device %s\n", dev_name(&pnp->dev)); diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 747826d99059..14655a0f0431 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -89,7 +89,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) pnp_dbg(&dev->dev, "set resources\n"); - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return -ENODEV; @@ -122,7 +122,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) dev_dbg(&dev->dev, "disable resources\n"); - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; @@ -144,7 +144,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev) struct acpi_device *acpi_dev; acpi_handle handle; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return false; @@ -159,7 +159,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) acpi_handle handle; int error = 0; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; @@ -194,7 +194,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) static int pnpacpi_resume(struct pnp_dev *dev) { struct acpi_device *acpi_dev; - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); int error = 0; if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index e6f92b450913..5e2054afe840 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -346,6 +346,12 @@ config CHARGER_BQ24190 help Say Y to enable support for the TI BQ24190 battery charger. +config CHARGER_BQ24735 + tristate "TI BQ24735 battery charger support" + depends on I2C && GPIOLIB + help + Say Y to enable support for the TI BQ24735 battery charger. + config CHARGER_SMB347 tristate "Summit Microelectronics SMB347 Battery Charger" depends on I2C diff --git a/drivers/power/Makefile b/drivers/power/Makefile index a4b74177706f..372b4e8ab598 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o +obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o obj-$(CONFIG_POWER_AVS) += avs/ obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c index a4c4a10b3a41..19110aa613a1 100644 --- a/drivers/power/ab8500_charger.c +++ b/drivers/power/ab8500_charger.c @@ -766,7 +766,6 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, ret = -ENXIO; break; } - break; case USB_STAT_CARKIT_1: case USB_STAT_CARKIT_2: case USB_STAT_ACA_DOCK_CHARGER: @@ -1387,8 +1386,12 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger, * the GPADC module independant of the AB8500 chargers */ if (!di->vddadc_en_ac) { - regulator_enable(di->regu); - di->vddadc_en_ac = true; + ret = regulator_enable(di->regu); + if (ret) + dev_warn(di->dev, + "Failed to enable regulator\n"); + else + di->vddadc_en_ac = true; } /* Check if the requested voltage or current is valid */ @@ -1556,8 +1559,12 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger, * the GPADC module independant of the AB8500 chargers */ if (!di->vddadc_en_usb) { - regulator_enable(di->regu); - di->vddadc_en_usb = true; + ret = regulator_enable(di->regu); + if (ret) + dev_warn(di->dev, + "Failed to enable regulator\n"); + else + di->vddadc_en_usb = true; } /* Enable USB charging */ diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c index 0727f9256138..df893dd1447d 100644 --- a/drivers/power/bq2415x_charger.c +++ b/drivers/power/bq2415x_charger.c @@ -605,9 +605,13 @@ static int bq2415x_set_battery_regulation_voltage(struct bq2415x_device *bq, { int val = (mV/10 - 350) / 2; + /* + * According to datasheet, maximum battery regulation voltage is + * 4440mV which is b101111 = 47. + */ if (val < 0) val = 0; - else if (val > 94) /* FIXME: Max is 94 or 122 ? Set max value ? */ + else if (val > 47) return -EINVAL; return bq2415x_i2c_write_mask(bq, BQ2415X_REG_VOLTAGE, val, diff --git a/drivers/power/bq24735-charger.c b/drivers/power/bq24735-charger.c new file mode 100644 index 000000000000..d022b823305b --- /dev/null +++ b/drivers/power/bq24735-charger.c @@ -0,0 +1,419 @@ +/* + * Battery charger driver for TI BQ24735 + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/power_supply.h> +#include <linux/slab.h> + +#include <linux/power/bq24735-charger.h> + +#define BQ24735_CHG_OPT 0x12 +#define BQ24735_CHG_OPT_CHARGE_DISABLE (1 << 0) +#define BQ24735_CHG_OPT_AC_PRESENT (1 << 4) +#define BQ24735_CHARGE_CURRENT 0x14 +#define BQ24735_CHARGE_CURRENT_MASK 0x1fc0 +#define BQ24735_CHARGE_VOLTAGE 0x15 +#define BQ24735_CHARGE_VOLTAGE_MASK 0x7ff0 +#define BQ24735_INPUT_CURRENT 0x3f +#define BQ24735_INPUT_CURRENT_MASK 0x1f80 +#define BQ24735_MANUFACTURER_ID 0xfe +#define BQ24735_DEVICE_ID 0xff + +struct bq24735 { + struct power_supply charger; + struct i2c_client *client; + struct bq24735_platform *pdata; +}; + +static inline struct bq24735 *to_bq24735(struct power_supply *psy) +{ + return container_of(psy, struct bq24735, charger); +} + +static enum power_supply_property bq24735_charger_properties[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static inline int bq24735_write_word(struct i2c_client *client, u8 reg, + u16 value) +{ + return i2c_smbus_write_word_data(client, reg, le16_to_cpu(value)); +} + +static inline int bq24735_read_word(struct i2c_client *client, u8 reg) +{ + s32 ret = i2c_smbus_read_word_data(client, reg); + + return ret < 0 ? ret : le16_to_cpu(ret); +} + +static int bq24735_update_word(struct i2c_client *client, u8 reg, + u16 mask, u16 value) +{ + unsigned int tmp; + int ret; + + ret = bq24735_read_word(client, reg); + if (ret < 0) + return ret; + + tmp = ret & ~mask; + tmp |= value & mask; + + return bq24735_write_word(client, reg, tmp); +} + +static inline int bq24735_enable_charging(struct bq24735 *charger) +{ + return bq24735_update_word(charger->client, BQ24735_CHG_OPT, + BQ24735_CHG_OPT_CHARGE_DISABLE, + ~BQ24735_CHG_OPT_CHARGE_DISABLE); +} + +static inline int bq24735_disable_charging(struct bq24735 *charger) +{ + return bq24735_update_word(charger->client, BQ24735_CHG_OPT, + BQ24735_CHG_OPT_CHARGE_DISABLE, + BQ24735_CHG_OPT_CHARGE_DISABLE); +} + +static int bq24735_config_charger(struct bq24735 *charger) +{ + struct bq24735_platform *pdata = charger->pdata; + int ret; + u16 value; + + if (pdata->charge_current) { + value = pdata->charge_current & BQ24735_CHARGE_CURRENT_MASK; + + ret = bq24735_write_word(charger->client, + BQ24735_CHARGE_CURRENT, value); + if (ret < 0) { + dev_err(&charger->client->dev, + "Failed to write charger current : %d\n", + ret); + return ret; + } + } + + if (pdata->charge_voltage) { + value = pdata->charge_voltage & BQ24735_CHARGE_VOLTAGE_MASK; + + ret = bq24735_write_word(charger->client, + BQ24735_CHARGE_VOLTAGE, value); + if (ret < 0) { + dev_err(&charger->client->dev, + "Failed to write charger voltage : %d\n", + ret); + return ret; + } + } + + if (pdata->input_current) { + value = pdata->input_current & BQ24735_INPUT_CURRENT_MASK; + + ret = bq24735_write_word(charger->client, + BQ24735_INPUT_CURRENT, value); + if (ret < 0) { + dev_err(&charger->client->dev, + "Failed to write input current : %d\n", + ret); + return ret; + } + } + + return 0; +} + +static bool bq24735_charger_is_present(struct bq24735 *charger) +{ + struct bq24735_platform *pdata = charger->pdata; + int ret; + + if (pdata->status_gpio_valid) { + ret = gpio_get_value_cansleep(pdata->status_gpio); + return ret ^= pdata->status_gpio_active_low == 0; + } else { + int ac = 0; + + ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT); + if (ac < 0) { + dev_err(&charger->client->dev, + "Failed to read charger options : %d\n", + ac); + return false; + } + return (ac & BQ24735_CHG_OPT_AC_PRESENT) ? true : false; + } + + return false; +} + +static irqreturn_t bq24735_charger_isr(int irq, void *devid) +{ + struct power_supply *psy = devid; + struct bq24735 *charger = to_bq24735(psy); + + if (bq24735_charger_is_present(charger)) + bq24735_enable_charging(charger); + else + bq24735_disable_charging(charger); + + power_supply_changed(psy); + + return IRQ_HANDLED; +} + +static int bq24735_charger_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct bq24735 *charger; + + charger = container_of(psy, struct bq24735, charger); + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + val->intval = bq24735_charger_is_present(charger) ? 1 : 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client) +{ + struct bq24735_platform *pdata; + struct device_node *np = client->dev.of_node; + u32 val; + int ret; + enum of_gpio_flags flags; + + pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(&client->dev, + "Memory alloc for bq24735 pdata failed\n"); + return NULL; + } + + pdata->status_gpio = of_get_named_gpio_flags(np, "ti,ac-detect-gpios", + 0, &flags); + + if (flags & OF_GPIO_ACTIVE_LOW) + pdata->status_gpio_active_low = 1; + + ret = of_property_read_u32(np, "ti,charge-current", &val); + if (!ret) + pdata->charge_current = val; + + ret = of_property_read_u32(np, "ti,charge-voltage", &val); + if (!ret) + pdata->charge_voltage = val; + + ret = of_property_read_u32(np, "ti,input-current", &val); + if (!ret) + pdata->input_current = val; + + return pdata; +} + +static int bq24735_charger_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct bq24735 *charger; + struct power_supply *supply; + char *name; + + charger = devm_kzalloc(&client->dev, sizeof(*charger), GFP_KERNEL); + if (!charger) + return -ENOMEM; + + charger->pdata = client->dev.platform_data; + + if (IS_ENABLED(CONFIG_OF) && !charger->pdata && client->dev.of_node) + charger->pdata = bq24735_parse_dt_data(client); + + if (!charger->pdata) { + dev_err(&client->dev, "no platform data provided\n"); + return -EINVAL; + } + + name = (char *)charger->pdata->name; + if (!name) { + name = kasprintf(GFP_KERNEL, "bq24735@%s", + dev_name(&client->dev)); + if (!name) { + dev_err(&client->dev, "Failed to alloc device name\n"); + return -ENOMEM; + } + } + + charger->client = client; + + supply = &charger->charger; + + supply->name = name; + supply->type = POWER_SUPPLY_TYPE_MAINS; + supply->properties = bq24735_charger_properties; + supply->num_properties = ARRAY_SIZE(bq24735_charger_properties); + supply->get_property = bq24735_charger_get_property; + supply->supplied_to = charger->pdata->supplied_to; + supply->num_supplicants = charger->pdata->num_supplicants; + supply->of_node = client->dev.of_node; + + i2c_set_clientdata(client, charger); + + ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID); + if (ret < 0) { + dev_err(&client->dev, "Failed to read manufacturer id : %d\n", + ret); + goto err_free_name; + } else if (ret != 0x0040) { + dev_err(&client->dev, + "manufacturer id mismatch. 0x0040 != 0x%04x\n", ret); + ret = -ENODEV; + goto err_free_name; + } + + ret = bq24735_read_word(client, BQ24735_DEVICE_ID); + if (ret < 0) { + dev_err(&client->dev, "Failed to read device id : %d\n", ret); + goto err_free_name; + } else if (ret != 0x000B) { + dev_err(&client->dev, + "device id mismatch. 0x000b != 0x%04x\n", ret); + ret = -ENODEV; + goto err_free_name; + } + + if (gpio_is_valid(charger->pdata->status_gpio)) { + ret = devm_gpio_request(&client->dev, + charger->pdata->status_gpio, + name); + if (ret) { + dev_err(&client->dev, + "Failed GPIO request for GPIO %d: %d\n", + charger->pdata->status_gpio, ret); + } + + charger->pdata->status_gpio_valid = !ret; + } + + ret = bq24735_config_charger(charger); + if (ret < 0) { + dev_err(&client->dev, "failed in configuring charger"); + goto err_free_name; + } + + /* check for AC adapter presence */ + if (bq24735_charger_is_present(charger)) { + ret = bq24735_enable_charging(charger); + if (ret < 0) { + dev_err(&client->dev, "Failed to enable charging\n"); + goto err_free_name; + } + } + + ret = power_supply_register(&client->dev, supply); + if (ret < 0) { + dev_err(&client->dev, "Failed to register power supply: %d\n", + ret); + goto err_free_name; + } + + if (client->irq) { + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, bq24735_charger_isr, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + supply->name, supply); + if (ret) { + dev_err(&client->dev, + "Unable to register IRQ %d err %d\n", + client->irq, ret); + goto err_unregister_supply; + } + } + + return 0; +err_unregister_supply: + power_supply_unregister(supply); +err_free_name: + if (name != charger->pdata->name) + kfree(name); + + return ret; +} + +static int bq24735_charger_remove(struct i2c_client *client) +{ + struct bq24735 *charger = i2c_get_clientdata(client); + + if (charger->client->irq) + devm_free_irq(&charger->client->dev, charger->client->irq, + &charger->charger); + + power_supply_unregister(&charger->charger); + + if (charger->charger.name != charger->pdata->name) + kfree(charger->charger.name); + + return 0; +} + +static const struct i2c_device_id bq24735_charger_id[] = { + { "bq24735-charger", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, bq24735_charger_id); + +static const struct of_device_id bq24735_match_ids[] = { + { .compatible = "ti,bq24735", }, + { /* end */ } +}; +MODULE_DEVICE_TABLE(of, bq24735_match_ids); + +static struct i2c_driver bq24735_charger_driver = { + .driver = { + .name = "bq24735-charger", + .owner = THIS_MODULE, + .of_match_table = bq24735_match_ids, + }, + .probe = bq24735_charger_probe, + .remove = bq24735_charger_remove, + .id_table = bq24735_charger_id, +}; + +module_i2c_driver(bq24735_charger_driver); + +MODULE_DESCRIPTION("bq24735 battery charging driver"); +MODULE_AUTHOR("Darbha Sriharsha <dsriharsha@nvidia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c index e30e847600bb..7287c0efd6bf 100644 --- a/drivers/power/charger-manager.c +++ b/drivers/power/charger-manager.c @@ -1378,7 +1378,8 @@ static int charger_manager_register_sysfs(struct charger_manager *cm) charger = &desc->charger_regulators[i]; snprintf(buf, 10, "charger.%d", i); - str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL); + str = devm_kzalloc(cm->dev, + sizeof(char) * (strlen(buf) + 1), GFP_KERNEL); if (!str) { ret = -ENOMEM; goto err; @@ -1452,30 +1453,23 @@ static int charger_manager_probe(struct platform_device *pdev) rtc_dev = NULL; dev_err(&pdev->dev, "Cannot get RTC %s\n", g_desc->rtc_name); - ret = -ENODEV; - goto err_alloc; + return -ENODEV; } } if (!desc) { dev_err(&pdev->dev, "No platform data (desc) found\n"); - ret = -ENODEV; - goto err_alloc; + return -ENODEV; } - cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL); - if (!cm) { - ret = -ENOMEM; - goto err_alloc; - } + cm = devm_kzalloc(&pdev->dev, + sizeof(struct charger_manager), GFP_KERNEL); + if (!cm) + return -ENOMEM; /* Basic Values. Unspecified are Null or 0 */ cm->dev = &pdev->dev; - cm->desc = kmemdup(desc, sizeof(struct charger_desc), GFP_KERNEL); - if (!cm->desc) { - ret = -ENOMEM; - goto err_alloc_desc; - } + cm->desc = desc; cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */ /* @@ -1498,27 +1492,23 @@ static int charger_manager_probe(struct platform_device *pdev) } if (!desc->charger_regulators || desc->num_charger_regulators < 1) { - ret = -EINVAL; dev_err(&pdev->dev, "charger_regulators undefined\n"); - goto err_no_charger; + return -EINVAL; } if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) { dev_err(&pdev->dev, "No power supply defined\n"); - ret = -EINVAL; - goto err_no_charger_stat; + return -EINVAL; } /* Counting index only */ while (desc->psy_charger_stat[i]) i++; - cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1), - GFP_KERNEL); - if (!cm->charger_stat) { - ret = -ENOMEM; - goto err_no_charger_stat; - } + cm->charger_stat = devm_kzalloc(&pdev->dev, + sizeof(struct power_supply *) * i, GFP_KERNEL); + if (!cm->charger_stat) + return -ENOMEM; for (i = 0; desc->psy_charger_stat[i]; i++) { cm->charger_stat[i] = power_supply_get_by_name( @@ -1526,8 +1516,7 @@ static int charger_manager_probe(struct platform_device *pdev) if (!cm->charger_stat[i]) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_charger_stat[i]); - ret = -ENODEV; - goto err_chg_stat; + return -ENODEV; } } @@ -1535,21 +1524,18 @@ static int charger_manager_probe(struct platform_device *pdev) if (!cm->fuel_gauge) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_fuel_gauge); - ret = -ENODEV; - goto err_chg_stat; + return -ENODEV; } if (desc->polling_interval_ms == 0 || msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) { dev_err(&pdev->dev, "polling_interval_ms is too small\n"); - ret = -EINVAL; - goto err_chg_stat; + return -EINVAL; } if (!desc->temperature_out_of_range) { dev_err(&pdev->dev, "there is no temperature_out_of_range\n"); - ret = -EINVAL; - goto err_chg_stat; + return -EINVAL; } if (!desc->charging_max_duration_ms || @@ -1570,14 +1556,13 @@ static int charger_manager_probe(struct platform_device *pdev) cm->charger_psy.name = cm->psy_name_buf; /* Allocate for psy properties because they may vary */ - cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property) + cm->charger_psy.properties = devm_kzalloc(&pdev->dev, + sizeof(enum power_supply_property) * (ARRAY_SIZE(default_charger_props) + - NUM_CHARGER_PSY_OPTIONAL), - GFP_KERNEL); - if (!cm->charger_psy.properties) { - ret = -ENOMEM; - goto err_chg_stat; - } + NUM_CHARGER_PSY_OPTIONAL), GFP_KERNEL); + if (!cm->charger_psy.properties) + return -ENOMEM; + memcpy(cm->charger_psy.properties, default_charger_props, sizeof(enum power_supply_property) * ARRAY_SIZE(default_charger_props)); @@ -1614,7 +1599,7 @@ static int charger_manager_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Cannot register charger-manager with name \"%s\"\n", cm->charger_psy.name); - goto err_register; + return ret; } /* Register extcon device for charger cable */ @@ -1655,8 +1640,6 @@ err_reg_sysfs: charger = &desc->charger_regulators[i]; sysfs_remove_group(&cm->charger_psy.dev->kobj, &charger->attr_g); - - kfree(charger->attr_g.name); } err_reg_extcon: for (i = 0; i < desc->num_charger_regulators; i++) { @@ -1674,16 +1657,7 @@ err_reg_extcon: } power_supply_unregister(&cm->charger_psy); -err_register: - kfree(cm->charger_psy.properties); -err_chg_stat: - kfree(cm->charger_stat); -err_no_charger_stat: -err_no_charger: - kfree(cm->desc); -err_alloc_desc: - kfree(cm); -err_alloc: + return ret; } @@ -1718,11 +1692,6 @@ static int charger_manager_remove(struct platform_device *pdev) try_charger_enable(cm, false); - kfree(cm->charger_psy.properties); - kfree(cm->charger_stat); - kfree(cm->desc); - kfree(cm); - return 0; } diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c index fc04d191579b..1bb3a91b1acc 100644 --- a/drivers/power/isp1704_charger.c +++ b/drivers/power/isp1704_charger.c @@ -2,6 +2,7 @@ * ISP1704 USB Charger Detection driver * * Copyright (C) 2010 Nokia Corporation + * Copyright (C) 2012 - 2013 Pali Rohár <pali.rohar@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -65,10 +66,6 @@ struct isp1704_charger { unsigned present:1; unsigned online:1; unsigned current_max; - - /* temp storage variables */ - unsigned long event; - unsigned max_power; }; static inline int isp1704_read(struct isp1704_charger *isp, u32 reg) @@ -231,56 +228,59 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp) return ret; } +static inline int isp1704_charger_detect_dcp(struct isp1704_charger *isp) +{ + if (isp1704_charger_detect(isp) && + isp1704_charger_type(isp) == POWER_SUPPLY_TYPE_USB_DCP) + return true; + else + return false; +} + static void isp1704_charger_work(struct work_struct *data) { - int detect; - unsigned long event; - unsigned power; struct isp1704_charger *isp = container_of(data, struct isp1704_charger, work); static DEFINE_MUTEX(lock); - event = isp->event; - power = isp->max_power; - mutex_lock(&lock); - if (event != USB_EVENT_NONE) - isp1704_charger_set_power(isp, 1); - - switch (event) { + switch (isp->phy->last_event) { case USB_EVENT_VBUS: - isp->online = true; + /* do not call wall charger detection more times */ + if (!isp->present) { + isp->online = true; + isp->present = 1; + isp1704_charger_set_power(isp, 1); + + /* detect wall charger */ + if (isp1704_charger_detect_dcp(isp)) { + isp->psy.type = POWER_SUPPLY_TYPE_USB_DCP; + isp->current_max = 1800; + } else { + isp->psy.type = POWER_SUPPLY_TYPE_USB; + isp->current_max = 500; + } - /* detect charger */ - detect = isp1704_charger_detect(isp); - - if (detect) { - isp->present = detect; - isp->psy.type = isp1704_charger_type(isp); + /* enable data pullups */ + if (isp->phy->otg->gadget) + usb_gadget_connect(isp->phy->otg->gadget); } - switch (isp->psy.type) { - case POWER_SUPPLY_TYPE_USB_DCP: - isp->current_max = 1800; - break; - case POWER_SUPPLY_TYPE_USB_CDP: + if (isp->psy.type != POWER_SUPPLY_TYPE_USB_DCP) { /* * Only 500mA here or high speed chirp * handshaking may break */ - isp->current_max = 500; - /* FALLTHROUGH */ - case POWER_SUPPLY_TYPE_USB: - default: - /* enable data pullups */ - if (isp->phy->otg->gadget) - usb_gadget_connect(isp->phy->otg->gadget); + if (isp->current_max > 500) + isp->current_max = 500; + + if (isp->current_max > 100) + isp->psy.type = POWER_SUPPLY_TYPE_USB_CDP; } break; case USB_EVENT_NONE: isp->online = false; - isp->current_max = 0; isp->present = 0; isp->current_max = 0; isp->psy.type = POWER_SUPPLY_TYPE_USB; @@ -298,12 +298,6 @@ static void isp1704_charger_work(struct work_struct *data) isp1704_charger_set_power(isp, 0); break; - case USB_EVENT_ENUMERATED: - if (isp->present) - isp->current_max = 1800; - else - isp->current_max = power; - break; default: goto out; } @@ -314,16 +308,11 @@ out: } static int isp1704_notifier_call(struct notifier_block *nb, - unsigned long event, void *power) + unsigned long val, void *v) { struct isp1704_charger *isp = container_of(nb, struct isp1704_charger, nb); - isp->event = event; - - if (power) - isp->max_power = *((unsigned *)power); - schedule_work(&isp->work); return NOTIFY_OK; @@ -462,13 +451,13 @@ static int isp1704_charger_probe(struct platform_device *pdev) if (isp->phy->otg->gadget) usb_gadget_disconnect(isp->phy->otg->gadget); + if (isp->phy->last_event == USB_EVENT_NONE) + isp1704_charger_set_power(isp, 0); + /* Detect charger if VBUS is valid (the cable was already plugged). */ - ret = isp1704_read(isp, ULPI_USB_INT_STS); - isp1704_charger_set_power(isp, 0); - if ((ret & ULPI_INT_VBUS_VALID) && !isp->phy->otg->default_a) { - isp->event = USB_EVENT_VBUS; + if (isp->phy->last_event == USB_EVENT_VBUS && + !isp->phy->otg->default_a) schedule_work(&isp->work); - } return 0; fail2: diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index d664ef58afa7..e0b22f9b6fdd 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c @@ -33,6 +33,7 @@ #include <linux/power_supply.h> #include <linux/power/max17042_battery.h> #include <linux/of.h> +#include <linux/regmap.h> /* Status register bits */ #define STATUS_POR_BIT (1 << 1) @@ -67,6 +68,7 @@ struct max17042_chip { struct i2c_client *client; + struct regmap *regmap; struct power_supply battery; enum max170xx_chip_type chip_type; struct max17042_platform_data *pdata; @@ -74,35 +76,6 @@ struct max17042_chip { int init_complete; }; -static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value) -{ - int ret = i2c_smbus_write_word_data(client, reg, value); - - if (ret < 0) - dev_err(&client->dev, "%s: err %d\n", __func__, ret); - - return ret; -} - -static int max17042_read_reg(struct i2c_client *client, u8 reg) -{ - int ret = i2c_smbus_read_word_data(client, reg); - - if (ret < 0) - dev_err(&client->dev, "%s: err %d\n", __func__, ret); - - return ret; -} - -static void max17042_set_reg(struct i2c_client *client, - struct max17042_reg_data *data, int size) -{ - int i; - - for (i = 0; i < size; i++) - max17042_write_reg(client, data[i].addr, data[i].data); -} - static enum power_supply_property max17042_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CYCLE_COUNT, @@ -125,96 +98,98 @@ static int max17042_get_property(struct power_supply *psy, { struct max17042_chip *chip = container_of(psy, struct max17042_chip, battery); + struct regmap *map = chip->regmap; int ret; + u32 data; if (!chip->init_complete) return -EAGAIN; switch (psp) { case POWER_SUPPLY_PROP_PRESENT: - ret = max17042_read_reg(chip->client, MAX17042_STATUS); + ret = regmap_read(map, MAX17042_STATUS, &data); if (ret < 0) return ret; - if (ret & MAX17042_STATUS_BattAbsent) + if (data & MAX17042_STATUS_BattAbsent) val->intval = 0; else val->intval = 1; break; case POWER_SUPPLY_PROP_CYCLE_COUNT: - ret = max17042_read_reg(chip->client, MAX17042_Cycles); + ret = regmap_read(map, MAX17042_Cycles, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX: - ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt); + ret = regmap_read(map, MAX17042_MinMaxVolt, &data); if (ret < 0) return ret; - val->intval = ret >> 8; + val->intval = data >> 8; val->intval *= 20000; /* Units of LSB = 20mV */ break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: if (chip->chip_type == MAX17042) - ret = max17042_read_reg(chip->client, MAX17042_V_empty); + ret = regmap_read(map, MAX17042_V_empty, &data); else - ret = max17042_read_reg(chip->client, MAX17047_V_empty); + ret = regmap_read(map, MAX17047_V_empty, &data); if (ret < 0) return ret; - val->intval = ret >> 7; + val->intval = data >> 7; val->intval *= 10000; /* Units of LSB = 10mV */ break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = max17042_read_reg(chip->client, MAX17042_VCELL); + ret = regmap_read(map, MAX17042_VCELL, &data); if (ret < 0) return ret; - val->intval = ret * 625 / 8; + val->intval = data * 625 / 8; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: - ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL); + ret = regmap_read(map, MAX17042_AvgVCELL, &data); if (ret < 0) return ret; - val->intval = ret * 625 / 8; + val->intval = data * 625 / 8; break; case POWER_SUPPLY_PROP_VOLTAGE_OCV: - ret = max17042_read_reg(chip->client, MAX17042_OCVInternal); + ret = regmap_read(map, MAX17042_OCVInternal, &data); if (ret < 0) return ret; - val->intval = ret * 625 / 8; + val->intval = data * 625 / 8; break; case POWER_SUPPLY_PROP_CAPACITY: - ret = max17042_read_reg(chip->client, MAX17042_RepSOC); + ret = regmap_read(map, MAX17042_RepSOC, &data); if (ret < 0) return ret; - val->intval = ret >> 8; + val->intval = data >> 8; break; case POWER_SUPPLY_PROP_CHARGE_FULL: - ret = max17042_read_reg(chip->client, MAX17042_FullCAP); + ret = regmap_read(map, MAX17042_FullCAP, &data); if (ret < 0) return ret; - val->intval = ret * 1000 / 2; + val->intval = data * 1000 / 2; break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: - ret = max17042_read_reg(chip->client, MAX17042_QH); + ret = regmap_read(map, MAX17042_QH, &data); if (ret < 0) return ret; - val->intval = ret * 1000 / 2; + val->intval = data * 1000 / 2; break; case POWER_SUPPLY_PROP_TEMP: - ret = max17042_read_reg(chip->client, MAX17042_TEMP); + ret = regmap_read(map, MAX17042_TEMP, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; /* The value is signed. */ if (val->intval & 0x8000) { val->intval = (0x7fff & ~val->intval) + 1; @@ -226,11 +201,11 @@ static int max17042_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CURRENT_NOW: if (chip->pdata->enable_current_sense) { - ret = max17042_read_reg(chip->client, MAX17042_Current); + ret = regmap_read(map, MAX17042_Current, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; if (val->intval & 0x8000) { /* Negative */ val->intval = ~val->intval & 0x7fff; @@ -244,12 +219,11 @@ static int max17042_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CURRENT_AVG: if (chip->pdata->enable_current_sense) { - ret = max17042_read_reg(chip->client, - MAX17042_AvgCurrent); + ret = regmap_read(map, MAX17042_AvgCurrent, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; if (val->intval & 0x8000) { /* Negative */ val->intval = ~val->intval & 0x7fff; @@ -267,16 +241,15 @@ static int max17042_get_property(struct power_supply *psy, return 0; } -static int max17042_write_verify_reg(struct i2c_client *client, - u8 reg, u16 value) +static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value) { int retries = 8; int ret; - u16 read_value; + u32 read_value; do { - ret = i2c_smbus_write_word_data(client, reg, value); - read_value = max17042_read_reg(client, reg); + ret = regmap_write(map, reg, value); + regmap_read(map, reg, &read_value); if (read_value != value) { ret = -EIO; retries--; @@ -284,50 +257,51 @@ static int max17042_write_verify_reg(struct i2c_client *client, } while (retries && read_value != value); if (ret < 0) - dev_err(&client->dev, "%s: err %d\n", __func__, ret); + pr_err("%s: err %d\n", __func__, ret); return ret; } -static inline void max17042_override_por( - struct i2c_client *client, u8 reg, u16 value) +static inline void max17042_override_por(struct regmap *map, + u8 reg, u16 value) { if (value) - max17042_write_reg(client, reg, value); + regmap_write(map, reg, value); } static inline void max10742_unlock_model(struct max17042_chip *chip) { - struct i2c_client *client = chip->client; - max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1); - max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2); + struct regmap *map = chip->regmap; + regmap_write(map, MAX17042_MLOCKReg1, MODEL_UNLOCK1); + regmap_write(map, MAX17042_MLOCKReg2, MODEL_UNLOCK2); } static inline void max10742_lock_model(struct max17042_chip *chip) { - struct i2c_client *client = chip->client; - max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1); - max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2); + struct regmap *map = chip->regmap; + + regmap_write(map, MAX17042_MLOCKReg1, MODEL_LOCK1); + regmap_write(map, MAX17042_MLOCKReg2, MODEL_LOCK2); } static inline void max17042_write_model_data(struct max17042_chip *chip, u8 addr, int size) { - struct i2c_client *client = chip->client; + struct regmap *map = chip->regmap; int i; for (i = 0; i < size; i++) - max17042_write_reg(client, addr + i, - chip->pdata->config_data->cell_char_tbl[i]); + regmap_write(map, addr + i, + chip->pdata->config_data->cell_char_tbl[i]); } static inline void max17042_read_model_data(struct max17042_chip *chip, - u8 addr, u16 *data, int size) + u8 addr, u32 *data, int size) { - struct i2c_client *client = chip->client; + struct regmap *map = chip->regmap; int i; for (i = 0; i < size; i++) - data[i] = max17042_read_reg(client, addr + i); + regmap_read(map, addr + i, &data[i]); } static inline int max17042_model_data_compare(struct max17042_chip *chip, @@ -350,7 +324,7 @@ static int max17042_init_model(struct max17042_chip *chip) { int ret; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u16 *temp_data; + u32 *temp_data; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); if (!temp_data) @@ -365,7 +339,7 @@ static int max17042_init_model(struct max17042_chip *chip) ret = max17042_model_data_compare( chip, chip->pdata->config_data->cell_char_tbl, - temp_data, + (u16 *)temp_data, table_size); max10742_lock_model(chip); @@ -378,7 +352,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) { int i; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u16 *temp_data; + u32 *temp_data; int ret = 0; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); @@ -398,40 +372,38 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) static void max17042_write_config_regs(struct max17042_chip *chip) { struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - max17042_write_reg(chip->client, MAX17042_CONFIG, config->config); - max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg); - max17042_write_reg(chip->client, MAX17042_FilterCFG, + regmap_write(map, MAX17042_CONFIG, config->config); + regmap_write(map, MAX17042_LearnCFG, config->learn_cfg); + regmap_write(map, MAX17042_FilterCFG, config->filter_cfg); - max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg); + regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg); if (chip->chip_type == MAX17047) - max17042_write_reg(chip->client, MAX17047_FullSOCThr, + regmap_write(map, MAX17047_FullSOCThr, config->full_soc_thresh); } static void max17042_write_custom_regs(struct max17042_chip *chip) { struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - max17042_write_verify_reg(chip->client, MAX17042_RCOMP0, - config->rcomp0); - max17042_write_verify_reg(chip->client, MAX17042_TempCo, - config->tcompc0); - max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm, - config->ichgt_term); + max17042_write_verify_reg(map, MAX17042_RCOMP0, config->rcomp0); + max17042_write_verify_reg(map, MAX17042_TempCo, config->tcompc0); + max17042_write_verify_reg(map, MAX17042_ICHGTerm, config->ichgt_term); if (chip->chip_type == MAX17042) { - max17042_write_reg(chip->client, MAX17042_EmptyTempCo, - config->empty_tempco); - max17042_write_verify_reg(chip->client, MAX17042_K_empty0, + regmap_write(map, MAX17042_EmptyTempCo, config->empty_tempco); + max17042_write_verify_reg(map, MAX17042_K_empty0, config->kempty0); } else { - max17042_write_verify_reg(chip->client, MAX17047_QRTbl00, + max17042_write_verify_reg(map, MAX17047_QRTbl00, config->qrtbl00); - max17042_write_verify_reg(chip->client, MAX17047_QRTbl10, + max17042_write_verify_reg(map, MAX17047_QRTbl10, config->qrtbl10); - max17042_write_verify_reg(chip->client, MAX17047_QRTbl20, + max17042_write_verify_reg(map, MAX17047_QRTbl20, config->qrtbl20); - max17042_write_verify_reg(chip->client, MAX17047_QRTbl30, + max17042_write_verify_reg(map, MAX17047_QRTbl30, config->qrtbl30); } } @@ -439,58 +411,60 @@ static void max17042_write_custom_regs(struct max17042_chip *chip) static void max17042_update_capacity_regs(struct max17042_chip *chip) { struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - max17042_write_verify_reg(chip->client, MAX17042_FullCAP, + max17042_write_verify_reg(map, MAX17042_FullCAP, config->fullcap); - max17042_write_reg(chip->client, MAX17042_DesignCap, - config->design_cap); - max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, + regmap_write(map, MAX17042_DesignCap, config->design_cap); + max17042_write_verify_reg(map, MAX17042_FullCAPNom, config->fullcapnom); } static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip) { - u16 vfSoc; + unsigned int vfSoc; + struct regmap *map = chip->regmap; - vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC); - max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK); - max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc); - max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK); + regmap_read(map, MAX17042_VFSOC, &vfSoc); + regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK); + max17042_write_verify_reg(map, MAX17042_VFSOC0, vfSoc); + regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_LOCK); } static void max17042_load_new_capacity_params(struct max17042_chip *chip) { - u16 full_cap0, rep_cap, dq_acc, vfSoc; + u32 full_cap0, rep_cap, dq_acc, vfSoc; u32 rem_cap; struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0); - vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC); + regmap_read(map, MAX17042_FullCAP0, &full_cap0); + regmap_read(map, MAX17042_VFSOC, &vfSoc); /* fg_vfSoc needs to shifted by 8 bits to get the * perc in 1% accuracy, to get the right rem_cap multiply * full_cap0, fg_vfSoc and devide by 100 */ rem_cap = ((vfSoc >> 8) * full_cap0) / 100; - max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap); + max17042_write_verify_reg(map, MAX17042_RemCap, rem_cap); - rep_cap = (u16)rem_cap; - max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap); + rep_cap = rem_cap; + max17042_write_verify_reg(map, MAX17042_RepCap, rep_cap); /* Write dQ_acc to 200% of Capacity and dP_acc to 200% */ dq_acc = config->fullcap / dQ_ACC_DIV; - max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc); - max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200); + max17042_write_verify_reg(map, MAX17042_dQacc, dq_acc); + max17042_write_verify_reg(map, MAX17042_dPacc, dP_ACC_200); - max17042_write_verify_reg(chip->client, MAX17042_FullCAP, + max17042_write_verify_reg(map, MAX17042_FullCAP, config->fullcap); - max17042_write_reg(chip->client, MAX17042_DesignCap, + regmap_write(map, MAX17042_DesignCap, config->design_cap); - max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, + max17042_write_verify_reg(map, MAX17042_FullCAPNom, config->fullcapnom); /* Update SOC register with new SOC */ - max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc); + regmap_write(map, MAX17042_RepSOC, vfSoc); } /* @@ -500,59 +474,60 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip) */ static inline void max17042_override_por_values(struct max17042_chip *chip) { - struct i2c_client *client = chip->client; + struct regmap *map = chip->regmap; struct max17042_config_data *config = chip->pdata->config_data; - max17042_override_por(client, MAX17042_TGAIN, config->tgain); - max17042_override_por(client, MAx17042_TOFF, config->toff); - max17042_override_por(client, MAX17042_CGAIN, config->cgain); - max17042_override_por(client, MAX17042_COFF, config->coff); - - max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh); - max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh); - max17042_override_por(client, MAX17042_SALRT_Th, - config->soc_alrt_thresh); - max17042_override_por(client, MAX17042_CONFIG, config->config); - max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer); - - max17042_override_por(client, MAX17042_DesignCap, config->design_cap); - max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term); - - max17042_override_por(client, MAX17042_AtRate, config->at_rate); - max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg); - max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg); - max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg); - max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg); - max17042_override_por(client, MAX17042_MaskSOC, config->masksoc); - - max17042_override_por(client, MAX17042_FullCAP, config->fullcap); - max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom); + max17042_override_por(map, MAX17042_TGAIN, config->tgain); + max17042_override_por(map, MAx17042_TOFF, config->toff); + max17042_override_por(map, MAX17042_CGAIN, config->cgain); + max17042_override_por(map, MAX17042_COFF, config->coff); + + max17042_override_por(map, MAX17042_VALRT_Th, config->valrt_thresh); + max17042_override_por(map, MAX17042_TALRT_Th, config->talrt_thresh); + max17042_override_por(map, MAX17042_SALRT_Th, + config->soc_alrt_thresh); + max17042_override_por(map, MAX17042_CONFIG, config->config); + max17042_override_por(map, MAX17042_SHDNTIMER, config->shdntimer); + + max17042_override_por(map, MAX17042_DesignCap, config->design_cap); + max17042_override_por(map, MAX17042_ICHGTerm, config->ichgt_term); + + max17042_override_por(map, MAX17042_AtRate, config->at_rate); + max17042_override_por(map, MAX17042_LearnCFG, config->learn_cfg); + max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg); + max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg); + max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg); + max17042_override_por(map, MAX17042_MaskSOC, config->masksoc); + + max17042_override_por(map, MAX17042_FullCAP, config->fullcap); + max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom); if (chip->chip_type == MAX17042) - max17042_override_por(client, MAX17042_SOC_empty, + max17042_override_por(map, MAX17042_SOC_empty, config->socempty); - max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty); - max17042_override_por(client, MAX17042_dQacc, config->dqacc); - max17042_override_por(client, MAX17042_dPacc, config->dpacc); + max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty); + max17042_override_por(map, MAX17042_dQacc, config->dqacc); + max17042_override_por(map, MAX17042_dPacc, config->dpacc); if (chip->chip_type == MAX17042) - max17042_override_por(client, MAX17042_V_empty, config->vempty); + max17042_override_por(map, MAX17042_V_empty, config->vempty); else - max17042_override_por(client, MAX17047_V_empty, config->vempty); - max17042_override_por(client, MAX17042_TempNom, config->temp_nom); - max17042_override_por(client, MAX17042_TempLim, config->temp_lim); - max17042_override_por(client, MAX17042_FCTC, config->fctc); - max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0); - max17042_override_por(client, MAX17042_TempCo, config->tcompc0); + max17042_override_por(map, MAX17047_V_empty, config->vempty); + max17042_override_por(map, MAX17042_TempNom, config->temp_nom); + max17042_override_por(map, MAX17042_TempLim, config->temp_lim); + max17042_override_por(map, MAX17042_FCTC, config->fctc); + max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0); + max17042_override_por(map, MAX17042_TempCo, config->tcompc0); if (chip->chip_type) { - max17042_override_por(client, MAX17042_EmptyTempCo, - config->empty_tempco); - max17042_override_por(client, MAX17042_K_empty0, - config->kempty0); + max17042_override_por(map, MAX17042_EmptyTempCo, + config->empty_tempco); + max17042_override_por(map, MAX17042_K_empty0, + config->kempty0); } } static int max17042_init_chip(struct max17042_chip *chip) { + struct regmap *map = chip->regmap; int ret; int val; @@ -597,31 +572,32 @@ static int max17042_init_chip(struct max17042_chip *chip) max17042_load_new_capacity_params(chip); /* Init complete, Clear the POR bit */ - val = max17042_read_reg(chip->client, MAX17042_STATUS); - max17042_write_reg(chip->client, MAX17042_STATUS, - val & (~STATUS_POR_BIT)); + regmap_read(map, MAX17042_STATUS, &val); + regmap_write(map, MAX17042_STATUS, val & (~STATUS_POR_BIT)); return 0; } static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off) { - u16 soc, soc_tr; + struct regmap *map = chip->regmap; + u32 soc, soc_tr; /* program interrupt thesholds such that we should * get interrupt for every 'off' perc change in the soc */ - soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8; + regmap_read(map, MAX17042_RepSOC, &soc); + soc >>= 8; soc_tr = (soc + off) << 8; soc_tr |= (soc - off); - max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr); + regmap_write(map, MAX17042_SALRT_Th, soc_tr); } static irqreturn_t max17042_thread_handler(int id, void *dev) { struct max17042_chip *chip = dev; - u16 val; + u32 val; - val = max17042_read_reg(chip->client, MAX17042_STATUS); + regmap_read(chip->regmap, MAX17042_STATUS, &val); if ((val & STATUS_INTR_SOCMIN_BIT) || (val & STATUS_INTR_SOCMAX_BIT)) { dev_info(&chip->client->dev, "SOC threshold INTR\n"); @@ -682,13 +658,20 @@ max17042_get_pdata(struct device *dev) } #endif +static struct regmap_config max17042_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .val_format_endian = REGMAP_ENDIAN_NATIVE, +}; + static int max17042_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct max17042_chip *chip; int ret; - int reg; + int i; + u32 val; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -EIO; @@ -698,6 +681,12 @@ static int max17042_probe(struct i2c_client *client, return -ENOMEM; chip->client = client; + chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config); + if (IS_ERR(chip->regmap)) { + dev_err(&client->dev, "Failed to initialize regmap\n"); + return -EINVAL; + } + chip->pdata = max17042_get_pdata(&client->dev); if (!chip->pdata) { dev_err(&client->dev, "no platform data provided\n"); @@ -706,15 +695,15 @@ static int max17042_probe(struct i2c_client *client, i2c_set_clientdata(client, chip); - ret = max17042_read_reg(chip->client, MAX17042_DevName); - if (ret == MAX17042_IC_VERSION) { + regmap_read(chip->regmap, MAX17042_DevName, &val); + if (val == MAX17042_IC_VERSION) { dev_dbg(&client->dev, "chip type max17042 detected\n"); chip->chip_type = MAX17042; - } else if (ret == MAX17047_IC_VERSION) { + } else if (val == MAX17047_IC_VERSION) { dev_dbg(&client->dev, "chip type max17047/50 detected\n"); chip->chip_type = MAX17047; } else { - dev_err(&client->dev, "device version mismatch: %x\n", ret); + dev_err(&client->dev, "device version mismatch: %x\n", val); return -EIO; } @@ -733,13 +722,15 @@ static int max17042_probe(struct i2c_client *client, chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR; if (chip->pdata->init_data) - max17042_set_reg(client, chip->pdata->init_data, - chip->pdata->num_init_data); + for (i = 0; i < chip->pdata->num_init_data; i++) + regmap_write(chip->regmap, + chip->pdata->init_data[i].addr, + chip->pdata->init_data[i].data); if (!chip->pdata->enable_current_sense) { - max17042_write_reg(client, MAX17042_CGAIN, 0x0000); - max17042_write_reg(client, MAX17042_MiscCFG, 0x0003); - max17042_write_reg(client, MAX17042_LearnCFG, 0x0007); + regmap_write(chip->regmap, MAX17042_CGAIN, 0x0000); + regmap_write(chip->regmap, MAX17042_MiscCFG, 0x0003); + regmap_write(chip->regmap, MAX17042_LearnCFG, 0x0007); } ret = power_supply_register(&client->dev, &chip->battery); @@ -754,9 +745,9 @@ static int max17042_probe(struct i2c_client *client, IRQF_TRIGGER_FALLING, chip->battery.name, chip); if (!ret) { - reg = max17042_read_reg(client, MAX17042_CONFIG); - reg |= CONFIG_ALRT_BIT_ENBL; - max17042_write_reg(client, MAX17042_CONFIG, reg); + regmap_read(chip->regmap, MAX17042_CONFIG, &val); + val |= CONFIG_ALRT_BIT_ENBL; + regmap_write(chip->regmap, MAX17042_CONFIG, val); max17042_set_soc_threshold(chip, 1); } else { client->irq = 0; @@ -765,8 +756,8 @@ static int max17042_probe(struct i2c_client *client, } } - reg = max17042_read_reg(chip->client, MAX17042_STATUS); - if (reg & STATUS_POR_BIT) { + regmap_read(chip->regmap, MAX17042_STATUS, &val); + if (val & STATUS_POR_BIT) { INIT_WORK(&chip->work, max17042_init_worker); schedule_work(&chip->work); } else { @@ -786,7 +777,7 @@ static int max17042_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int max17042_suspend(struct device *dev) { struct max17042_chip *chip = dev_get_drvdata(dev); @@ -816,17 +807,11 @@ static int max17042_resume(struct device *dev) return 0; } - -static const struct dev_pm_ops max17042_pm_ops = { - .suspend = max17042_suspend, - .resume = max17042_resume, -}; - -#define MAX17042_PM_OPS (&max17042_pm_ops) -#else -#define MAX17042_PM_OPS NULL #endif +static SIMPLE_DEV_PM_OPS(max17042_pm_ops, max17042_suspend, + max17042_resume); + #ifdef CONFIG_OF static const struct of_device_id max17042_dt_match[] = { { .compatible = "maxim,max17042" }, @@ -849,7 +834,7 @@ static struct i2c_driver max17042_i2c_driver = { .driver = { .name = "max17042", .of_match_table = of_match_ptr(max17042_dt_match), - .pm = MAX17042_PM_OPS, + .pm = &max17042_pm_ops, }, .probe = max17042_probe, .remove = max17042_remove, diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c index ffa10ed83eb1..62c15af58c9a 100644 --- a/drivers/power/pm2301_charger.c +++ b/drivers/power/pm2301_charger.c @@ -205,7 +205,7 @@ static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val) } -int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val) +static int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val) { queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work); @@ -722,8 +722,12 @@ static int pm2xxx_charger_ac_en(struct ux500_charger *charger, dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset); if (!pm2->vddadc_en_ac) { - regulator_enable(pm2->regu); - pm2->vddadc_en_ac = true; + ret = regulator_enable(pm2->regu); + if (ret) + dev_warn(pm2->dev, + "Failed to enable vddadc regulator\n"); + else + pm2->vddadc_en_ac = true; } ret = pm2xxx_charging_init(pm2); @@ -953,37 +957,24 @@ static int pm2xxx_runtime_suspend(struct device *dev) { struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev); struct pm2xxx_charger *pm2; - int ret = 0; pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client); - if (!pm2) { - dev_err(pm2->dev, "no pm2xxx_charger data supplied\n"); - ret = -EINVAL; - return ret; - } - clear_lpn_pin(pm2); - return ret; + return 0; } static int pm2xxx_runtime_resume(struct device *dev) { struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev); struct pm2xxx_charger *pm2; - int ret = 0; pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client); - if (!pm2) { - dev_err(pm2->dev, "no pm2xxx_charger data supplied\n"); - ret = -EINVAL; - return ret; - } if (gpio_is_valid(pm2->lpn_pin) && gpio_get_value(pm2->lpn_pin) == 0) set_lpn_pin(pm2); - return ret; + return 0; } #endif diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c index bdd7b9b2546a..8fc9d6df87f6 100644 --- a/drivers/power/tps65090-charger.c +++ b/drivers/power/tps65090-charger.c @@ -15,15 +15,17 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/slab.h> -#include <linux/delay.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/power_supply.h> +#include <linux/slab.h> + #include <linux/mfd/tps65090.h> #define TPS65090_REG_INTR_STS 0x00 @@ -185,10 +187,6 @@ static irqreturn_t tps65090_charger_isr(int irq, void *dev_id) return IRQ_HANDLED; } -#if defined(CONFIG_OF) - -#include <linux/of_device.h> - static struct tps65090_platform_data * tps65090_parse_dt_charger_data(struct platform_device *pdev) { @@ -210,13 +208,6 @@ static struct tps65090_platform_data * return pdata; } -#else -static struct tps65090_platform_data * - tps65090_parse_dt_charger_data(struct platform_device *pdev) -{ - return NULL; -} -#endif static int tps65090_charger_probe(struct platform_device *pdev) { @@ -228,7 +219,7 @@ static int tps65090_charger_probe(struct platform_device *pdev) pdata = dev_get_platdata(pdev->dev.parent); - if (!pdata && pdev->dev.of_node) + if (IS_ENABLED(CONFIG_OF) && !pdata && pdev->dev.of_node) pdata = tps65090_parse_dt_charger_data(pdev); if (!pdata) { @@ -277,13 +268,13 @@ static int tps65090_charger_probe(struct platform_device *pdev) if (ret) { dev_err(cdata->dev, "Unable to register irq %d err %d\n", irq, ret); - goto fail_free_irq; + goto fail_unregister_supply; } ret = tps65090_config_charger(cdata); if (ret < 0) { dev_err(&pdev->dev, "charger config failed, err %d\n", ret); - goto fail_free_irq; + goto fail_unregister_supply; } /* Check for charger presence */ @@ -292,14 +283,14 @@ static int tps65090_charger_probe(struct platform_device *pdev) if (ret < 0) { dev_err(cdata->dev, "%s(): Error in reading reg 0x%x", __func__, TPS65090_REG_CG_STATUS1); - goto fail_free_irq; + goto fail_unregister_supply; } if (status1 != 0) { ret = tps65090_enable_charging(cdata); if (ret < 0) { dev_err(cdata->dev, "error enabling charger\n"); - goto fail_free_irq; + goto fail_unregister_supply; } cdata->ac_online = 1; power_supply_changed(&cdata->ac); @@ -307,8 +298,6 @@ static int tps65090_charger_probe(struct platform_device *pdev) return 0; -fail_free_irq: - devm_free_irq(cdata->dev, irq, cdata); fail_unregister_supply: power_supply_unregister(&cdata->ac); @@ -319,7 +308,6 @@ static int tps65090_charger_remove(struct platform_device *pdev) { struct tps65090_charger *cdata = platform_get_drvdata(pdev); - devm_free_irq(cdata->dev, cdata->irq, cdata); power_supply_unregister(&cdata->ac); return 0; diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c index d98abe911e37..f14108844e1a 100644 --- a/drivers/power/twl4030_charger.c +++ b/drivers/power/twl4030_charger.c @@ -495,10 +495,38 @@ static enum power_supply_property twl4030_charger_props[] = { POWER_SUPPLY_PROP_CURRENT_NOW, }; +#ifdef CONFIG_OF +static const struct twl4030_bci_platform_data * +twl4030_bci_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct twl4030_bci_platform_data *pdata; + u32 num; + + if (!np) + return NULL; + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return pdata; + + if (of_property_read_u32(np, "ti,bb-uvolt", &num) == 0) + pdata->bb_uvolt = num; + if (of_property_read_u32(np, "ti,bb-uamp", &num) == 0) + pdata->bb_uamp = num; + return pdata; +} +#else +static inline const struct twl4030_bci_platform_data * +twl4030_bci_parse_dt(struct device *dev) +{ + return NULL; +} +#endif + static int __init twl4030_bci_probe(struct platform_device *pdev) { struct twl4030_bci *bci; - struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data; + const struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data; int ret; u32 reg; @@ -506,6 +534,9 @@ static int __init twl4030_bci_probe(struct platform_device *pdev) if (bci == NULL) return -ENOMEM; + if (!pdata) + pdata = twl4030_bci_parse_dt(&pdev->dev); + bci->dev = &pdev->dev; bci->irq_chg = platform_get_irq(pdev, 0); bci->irq_bci = platform_get_irq(pdev, 1); @@ -581,8 +612,11 @@ static int __init twl4030_bci_probe(struct platform_device *pdev) twl4030_charger_enable_ac(true); twl4030_charger_enable_usb(bci, true); - twl4030_charger_enable_backup(pdata->bb_uvolt, - pdata->bb_uamp); + if (pdata) + twl4030_charger_enable_backup(pdata->bb_uvolt, + pdata->bb_uamp); + else + twl4030_charger_enable_backup(0, 0); return 0; @@ -631,10 +665,17 @@ static int __exit twl4030_bci_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id twl_bci_of_match[] = { + {.compatible = "ti,twl4030-bci", }, + { } +}; +MODULE_DEVICE_TABLE(of, twl_bci_of_match); + static struct platform_driver twl4030_bci_driver = { .driver = { .name = "twl4030_bci", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(twl_bci_of_match), }, .remove = __exit_p(twl4030_bci_remove), }; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 75840b5cea6d..eece329d7872 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -62,6 +62,15 @@ config PWM_BFIN To compile this driver as a module, choose M here: the module will be called pwm-bfin. +config PWM_EP93XX + tristate "Cirrus Logic EP93xx PWM support" + depends on ARCH_EP93XX + help + Generic PWM framework driver for Cirrus Logic EP93xx. + + To compile this driver as a module, choose M here: the module + will be called pwm-ep93xx. + config PWM_IMX tristate "i.MX PWM support" depends on ARCH_MXC diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 77a8c185c5b2..8b754e4dba4a 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PWM_SYSFS) += sysfs.o obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o +obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o obj-$(CONFIG_PWM_IMX) += pwm-imx.o obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index ba6ce01035e4..f3dcd02390f1 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -249,6 +249,8 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) } } + cmr |= (tcbpwm->div & ATMEL_TC_TCCLKS); + __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR)); if (index == 0) @@ -305,7 +307,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, i = slowclk; rate = 32768; min = div_u64(NSEC_PER_SEC, rate); - max = min << 16; + max = min << tc->tcb_config->counter_width; /* If period is too big return ERANGE error */ if (max < period_ns) diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c new file mode 100644 index 000000000000..33aa4461e1ce --- /dev/null +++ b/drivers/pwm/pwm-ep93xx.c @@ -0,0 +1,230 @@ +/* + * PWM framework driver for Cirrus Logic EP93xx + * + * Copyright (c) 2009 Matthieu Crapet <mcrapet@gmail.com> + * Copyright (c) 2009, 2013 H Hartley Sweeten <hsweeten@visionengravers.com> + * + * EP9301/02 have only one channel: + * platform device ep93xx-pwm.1 - PWMOUT1 (EGPIO14) + * + * EP9307 has only one channel: + * platform device ep93xx-pwm.0 - PWMOUT + * + * EP9312/15 have two channels: + * platform device ep93xx-pwm.0 - PWMOUT + * platform device ep93xx-pwm.1 - PWMOUT1 (EGPIO14) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/pwm.h> + +#include <asm/div64.h> + +#include <mach/platform.h> /* for ep93xx_pwm_{acquire,release}_gpio() */ + +#define EP93XX_PWMx_TERM_COUNT 0x00 +#define EP93XX_PWMx_DUTY_CYCLE 0x04 +#define EP93XX_PWMx_ENABLE 0x08 +#define EP93XX_PWMx_INVERT 0x0c + +struct ep93xx_pwm { + void __iomem *base; + struct clk *clk; + struct pwm_chip chip; +}; + +static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip) +{ + return container_of(chip, struct ep93xx_pwm, chip); +} + +static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct platform_device *pdev = to_platform_device(chip->dev); + + return ep93xx_pwm_acquire_gpio(pdev); +} + +static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct platform_device *pdev = to_platform_device(chip->dev); + + ep93xx_pwm_release_gpio(pdev); +} + +static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); + void __iomem *base = ep93xx_pwm->base; + unsigned long long c; + unsigned long period_cycles; + unsigned long duty_cycles; + unsigned long term; + int ret = 0; + + /* + * The clock needs to be enabled to access the PWM registers. + * Configuration can be changed at any time. + */ + if (!test_bit(PWMF_ENABLED, &pwm->flags)) { + ret = clk_enable(ep93xx_pwm->clk); + if (ret) + return ret; + } + + c = clk_get_rate(ep93xx_pwm->clk); + c *= period_ns; + do_div(c, 1000000000); + period_cycles = c; + + c = period_cycles; + c *= duty_ns; + do_div(c, period_ns); + duty_cycles = c; + + if (period_cycles < 0x10000 && duty_cycles < 0x10000) { + term = readw(base + EP93XX_PWMx_TERM_COUNT); + + /* Order is important if PWM is running */ + if (period_cycles > term) { + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); + } else { + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); + } + } else { + ret = -EINVAL; + } + + if (!test_bit(PWMF_ENABLED, &pwm->flags)) + clk_disable(ep93xx_pwm->clk); + + return ret; +} + +static int ep93xx_pwm_polarity(struct pwm_chip *chip, struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); + int ret; + + /* + * The clock needs to be enabled to access the PWM registers. + * Polarity can only be changed when the PWM is disabled. + */ + ret = clk_enable(ep93xx_pwm->clk); + if (ret) + return ret; + + if (polarity == PWM_POLARITY_INVERSED) + writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_INVERT); + else + writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_INVERT); + + clk_disable(ep93xx_pwm->clk); + + return 0; +} + +static int ep93xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); + int ret; + + ret = clk_enable(ep93xx_pwm->clk); + if (ret) + return ret; + + writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_ENABLE); + + return 0; +} + +static void ep93xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); + + writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_ENABLE); + clk_disable(ep93xx_pwm->clk); +} + +static const struct pwm_ops ep93xx_pwm_ops = { + .request = ep93xx_pwm_request, + .free = ep93xx_pwm_free, + .config = ep93xx_pwm_config, + .set_polarity = ep93xx_pwm_polarity, + .enable = ep93xx_pwm_enable, + .disable = ep93xx_pwm_disable, + .owner = THIS_MODULE, +}; + +static int ep93xx_pwm_probe(struct platform_device *pdev) +{ + struct ep93xx_pwm *ep93xx_pwm; + struct resource *res; + int ret; + + ep93xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_pwm), GFP_KERNEL); + if (!ep93xx_pwm) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ep93xx_pwm->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ep93xx_pwm->base)) + return PTR_ERR(ep93xx_pwm->base); + + ep93xx_pwm->clk = devm_clk_get(&pdev->dev, "pwm_clk"); + if (IS_ERR(ep93xx_pwm->clk)) + return PTR_ERR(ep93xx_pwm->clk); + + ep93xx_pwm->chip.dev = &pdev->dev; + ep93xx_pwm->chip.ops = &ep93xx_pwm_ops; + ep93xx_pwm->chip.base = -1; + ep93xx_pwm->chip.npwm = 1; + + ret = pwmchip_add(&ep93xx_pwm->chip); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, ep93xx_pwm); + return 0; +} + +static int ep93xx_pwm_remove(struct platform_device *pdev) +{ + struct ep93xx_pwm *ep93xx_pwm = platform_get_drvdata(pdev); + + return pwmchip_remove(&ep93xx_pwm->chip); +} + +static struct platform_driver ep93xx_pwm_driver = { + .driver = { + .name = "ep93xx-pwm", + }, + .probe = ep93xx_pwm_probe, + .remove = ep93xx_pwm_remove, +}; +module_platform_driver(ep93xx_pwm_driver); + +MODULE_DESCRIPTION("Cirrus Logic EP93xx PWM driver"); +MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, " + "H Hartley Sweeten <hsweeten@visionengravers.com>"); +MODULE_ALIAS("platform:ep93xx-pwm"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index 2b7c4f88b461..cc4773344874 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c @@ -16,6 +16,7 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/pwm.h> +#include <linux/of.h> #include <linux/of_device.h> /* i.MX1 and i.MX21 share the same PWM function block: */ @@ -296,7 +297,7 @@ static struct platform_driver imx_pwm_driver = { .driver = { .name = "imx-pwm", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(imx_pwm_dt_ids), + .of_match_table = imx_pwm_dt_ids, }, .probe = imx_pwm_probe, .remove = imx_pwm_remove, diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c index efac99e03d57..9dc0f9d42bfa 100644 --- a/drivers/pwm/pwm-lpc32xx.c +++ b/drivers/pwm/pwm-lpc32xx.c @@ -169,7 +169,7 @@ static struct platform_driver lpc32xx_pwm_driver = { .driver = { .name = "lpc32xx-pwm", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(lpc32xx_pwm_dt_ids), + .of_match_table = lpc32xx_pwm_dt_ids, }, .probe = lpc32xx_pwm_probe, .remove = lpc32xx_pwm_remove, diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c index c2c5a4fd1b96..9475bc7a6f97 100644 --- a/drivers/pwm/pwm-mxs.c +++ b/drivers/pwm/pwm-mxs.c @@ -189,7 +189,7 @@ static struct platform_driver mxs_pwm_driver = { .driver = { .name = "mxs-pwm", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(mxs_pwm_dt_ids), + .of_match_table = mxs_pwm_dt_ids, }, .probe = mxs_pwm_probe, .remove = mxs_pwm_remove, diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index fcc8b9adde9f..b59639e0c029 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -18,6 +18,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> @@ -224,8 +225,8 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm) static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm) { - pwm_set_chip_data(pwm, NULL); devm_kfree(chip->dev, pwm_get_chip_data(pwm)); + pwm_set_chip_data(pwm, NULL); } static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm) diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index c2e2e5852362..4e5c3d13d4f8 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -26,7 +26,6 @@ #include <linux/pm_runtime.h> #include <linux/pwm.h> #include <linux/of_device.h> -#include <linux/pinctrl/consumer.h> #include "pwm-tipwmss.h" @@ -208,11 +207,6 @@ static int ecap_pwm_probe(struct platform_device *pdev) struct clk *clk; struct ecap_pwm_chip *pc; u16 status; - struct pinctrl *pinctrl; - - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) - dev_warn(&pdev->dev, "unable to select pin group\n"); pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); if (!pc) { diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 084f55246532..a4d8f519d965 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -26,7 +26,6 @@ #include <linux/clk.h> #include <linux/pm_runtime.h> #include <linux/of_device.h> -#include <linux/pinctrl/consumer.h> #include "pwm-tipwmss.h" @@ -439,11 +438,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev) struct clk *clk; struct ehrpwm_pwm_chip *pc; u16 status; - struct pinctrl *pinctrl; - - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) - dev_warn(&pdev->dev, "unable to select pin group\n"); pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); if (!pc) { diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index 29d1bba4804e..b964470025c5 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -21,6 +21,7 @@ */ #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/i2c/twl.h> diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c index eef910580eae..b99a50e626a6 100644 --- a/drivers/pwm/pwm-twl.c +++ b/drivers/pwm/pwm-twl.c @@ -18,6 +18,7 @@ */ #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/i2c/twl.h> diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 23f8d1ce877d..a00132e31ec7 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -906,7 +906,7 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic, } ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0); if (ret < 0) { - dev_err(mfd->dev, "Error in settting sr register\n"); + dev_err(mfd->dev, "Error in setting sr register\n"); return ret; } } diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 548209a9c43c..d0ab5019d885 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -118,22 +118,6 @@ static void scm_request_done(struct scm_request *scmrq) spin_unlock_irqrestore(&list_lock, flags); } -static int scm_open(struct block_device *blkdev, fmode_t mode) -{ - return scm_get_ref(); -} - -static void scm_release(struct gendisk *gendisk, fmode_t mode) -{ - scm_put_ref(); -} - -static const struct block_device_operations scm_blk_devops = { - .owner = THIS_MODULE, - .open = scm_open, - .release = scm_release, -}; - static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) { return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; @@ -256,7 +240,7 @@ static void scm_blk_request(struct request_queue *rq) atomic_inc(&bdev->queued_reqs); blk_start_request(req); - ret = scm_start_aob(scmrq->aob); + ret = eadm_start_aob(scmrq->aob); if (ret) { SCM_LOG(5, "no subchannel"); scm_request_requeue(scmrq); @@ -320,7 +304,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq) } restart: - if (!scm_start_aob(scmrq->aob)) + if (!eadm_start_aob(scmrq->aob)) return; requeue: @@ -363,6 +347,10 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) blk_run_queue(bdev->rq); } +static const struct block_device_operations scm_blk_devops = { + .owner = THIS_MODULE, +}; + int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) { struct request_queue *rq; diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index c0d102e3a48b..27f930cd657f 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -187,7 +187,7 @@ bool scm_need_cluster_request(struct scm_request *scmrq) void scm_initiate_cluster_request(struct scm_request *scmrq) { scm_prepare_cluster_request(scmrq); - if (scm_start_aob(scmrq->aob)) + if (eadm_start_aob(scmrq->aob)) scm_request_requeue(scmrq); } diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 17821a026c9c..b69ab17f13fa 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,8 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o + sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ + sclp_early.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index f93cc32eb818..71e974738014 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -564,6 +564,7 @@ static void __exit fs3270_exit(void) { raw3270_unregister_notifier(&fs3270_notifier); + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0)); __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); } diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 40d1406289ed..6fbe09686d18 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -99,6 +99,7 @@ struct init_sccb { } __attribute__((packed)); extern u64 sclp_facilities; + #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) @@ -179,6 +180,10 @@ void sclp_sdias_exit(void); extern int sclp_console_pages; extern int sclp_console_drop; extern unsigned long sclp_console_full; +extern u8 sclp_fac84; +extern unsigned long long sclp_rzm; +extern unsigned long long sclp_rnmax; +extern __initdata int sclp_early_read_info_sccb_valid; /* useful inlines */ diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 77df9cb00688..eaa21d542c5c 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -28,168 +28,6 @@ #include "sclp.h" -#define SCLP_CMDW_READ_SCP_INFO 0x00020001 -#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 - -struct read_info_sccb { - struct sccb_header header; /* 0-7 */ - u16 rnmax; /* 8-9 */ - u8 rnsize; /* 10 */ - u8 _reserved0[24 - 11]; /* 11-15 */ - u8 loadparm[8]; /* 24-31 */ - u8 _reserved1[48 - 32]; /* 32-47 */ - u64 facilities; /* 48-55 */ - u8 _reserved2[84 - 56]; /* 56-83 */ - u8 fac84; /* 84 */ - u8 fac85; /* 85 */ - u8 _reserved3[91 - 86]; /* 86-90 */ - u8 flags; /* 91 */ - u8 _reserved4[100 - 92]; /* 92-99 */ - u32 rnsize2; /* 100-103 */ - u64 rnmax2; /* 104-111 */ - u8 _reserved5[4096 - 112]; /* 112-4095 */ -} __attribute__((packed, aligned(PAGE_SIZE))); - -static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE); -static struct read_info_sccb __initdata early_read_info_sccb; -static int __initdata early_read_info_sccb_valid; - -u64 sclp_facilities; -static u8 sclp_fac84; -static unsigned long long rzm; -static unsigned long long rnmax; - -static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) -{ - int rc; - - __ctl_set_bit(0, 9); - rc = sclp_service_call(cmd, sccb); - if (rc) - goto out; - __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | - PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); - local_irq_disable(); -out: - /* Contents of the sccb might have changed. */ - barrier(); - __ctl_clear_bit(0, 9); - return rc; -} - -static void __init sclp_read_info_early(void) -{ - int rc; - int i; - struct read_info_sccb *sccb; - sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, - SCLP_CMDW_READ_SCP_INFO}; - - sccb = &early_read_info_sccb; - for (i = 0; i < ARRAY_SIZE(commands); i++) { - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->header.function_code = 0x80; - sccb->header.control_mask[2] = 0x80; - rc = sclp_cmd_sync_early(commands[i], sccb); - } while (rc == -EBUSY); - - if (rc) - break; - if (sccb->header.response_code == 0x10) { - early_read_info_sccb_valid = 1; - break; - } - if (sccb->header.response_code != 0x1f0) - break; - } -} - -static void __init sclp_event_mask_early(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - int rc; - - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->mask_length = sizeof(sccb_mask_t); - rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); - } while (rc == -EBUSY); -} - -void __init sclp_facilities_detect(void) -{ - struct read_info_sccb *sccb; - - sclp_read_info_early(); - if (!early_read_info_sccb_valid) - return; - - sccb = &early_read_info_sccb; - sclp_facilities = sccb->facilities; - sclp_fac84 = sccb->fac84; - if (sccb->fac85 & 0x02) - S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; - rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; - rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; - rzm <<= 20; - - sclp_event_mask_early(); -} - -bool __init sclp_has_linemode(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - return 1; -} - -bool __init sclp_has_vt220(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) - return 1; - return 0; -} - -unsigned long long sclp_get_rnmax(void) -{ - return rnmax; -} - -unsigned long long sclp_get_rzm(void) -{ - return rzm; -} - -/* - * This function will be called after sclp_facilities_detect(), which gets - * called from early.c code. Therefore the sccb should have valid contents. - */ -void __init sclp_get_ipl_info(struct sclp_ipl_info *info) -{ - struct read_info_sccb *sccb; - - if (!early_read_info_sccb_valid) - return; - sccb = &early_read_info_sccb; - info->is_valid = 1; - if (sccb->flags & 0x2) - info->has_dump = 1; - memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); -} - static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; @@ -356,14 +194,14 @@ struct assign_storage_sccb { int arch_get_memory_phys_device(unsigned long start_pfn) { - if (!rzm) + if (!sclp_rzm) return 0; - return PFN_PHYS(start_pfn) >> ilog2(rzm); + return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm); } static unsigned long long rn2addr(u16 rn) { - return (unsigned long long) (rn - 1) * rzm; + return (unsigned long long) (rn - 1) * sclp_rzm; } static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) @@ -404,7 +242,7 @@ static int sclp_assign_storage(u16 rn) if (rc) return rc; start = rn2addr(rn); - storage_key_init_range(start, start + rzm); + storage_key_init_range(start, start + sclp_rzm); return 0; } @@ -462,7 +300,7 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size, istart = rn2addr(incr->rn); if (start + size - 1 < istart) break; - if (start > istart + rzm - 1) + if (start > istart + sclp_rzm - 1) continue; if (online) rc |= sclp_assign_storage(incr->rn); @@ -526,7 +364,7 @@ static void __init add_memory_merged(u16 rn) if (!first_rn) goto skip_add; start = rn2addr(first_rn); - size = (unsigned long long ) num * rzm; + size = (unsigned long long) num * sclp_rzm; if (start >= VMEM_MAX_PHYS) goto skip_add; if (start + size > VMEM_MAX_PHYS) @@ -574,7 +412,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) } if (!assigned) new_incr->rn = last_rn + 1; - if (new_incr->rn > rnmax) { + if (new_incr->rn > sclp_rnmax) { kfree(new_incr); return; } @@ -617,7 +455,7 @@ static int __init sclp_detect_standby_memory(void) if (OLDMEM_BASE) /* No standby memory in kdump mode */ return 0; - if (!early_read_info_sccb_valid) + if (!sclp_early_read_info_sccb_valid) return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; @@ -661,7 +499,7 @@ static int __init sclp_detect_standby_memory(void) } if (rc || list_empty(&sclp_mem_list)) goto out; - for (i = 1; i <= rnmax - assigned; i++) + for (i = 1; i <= sclp_rnmax - assigned; i++) insert_increment(0, 1, 0); rc = register_memory_notifier(&sclp_mem_nb); if (rc) diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c new file mode 100644 index 000000000000..f7aa080e9b28 --- /dev/null +++ b/drivers/s390/char/sclp_early.c @@ -0,0 +1,264 @@ +/* + * SCLP early driver + * + * Copyright IBM Corp. 2013 + */ + +#define KMSG_COMPONENT "sclp_early" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/ctl_reg.h> +#include <asm/sclp.h> +#include <asm/ipl.h> +#include "sclp_sdias.h" +#include "sclp.h" + +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 + +struct read_info_sccb { + struct sccb_header header; /* 0-7 */ + u16 rnmax; /* 8-9 */ + u8 rnsize; /* 10 */ + u8 _reserved0[24 - 11]; /* 11-15 */ + u8 loadparm[8]; /* 24-31 */ + u8 _reserved1[48 - 32]; /* 32-47 */ + u64 facilities; /* 48-55 */ + u8 _reserved2[84 - 56]; /* 56-83 */ + u8 fac84; /* 84 */ + u8 fac85; /* 85 */ + u8 _reserved3[91 - 86]; /* 86-90 */ + u8 flags; /* 91 */ + u8 _reserved4[100 - 92]; /* 92-99 */ + u32 rnsize2; /* 100-103 */ + u64 rnmax2; /* 104-111 */ + u8 _reserved5[4096 - 112]; /* 112-4095 */ +} __packed __aligned(PAGE_SIZE); + +static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE); +static __initdata struct read_info_sccb early_read_info_sccb; +static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); +static unsigned long sclp_hsa_size; + +__initdata int sclp_early_read_info_sccb_valid; +u64 sclp_facilities; +u8 sclp_fac84; +unsigned long long sclp_rzm; +unsigned long long sclp_rnmax; + +static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + __ctl_set_bit(0, 9); + rc = sclp_service_call(cmd, sccb); + if (rc) + goto out; + __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | + PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); + local_irq_disable(); +out: + /* Contents of the sccb might have changed. */ + barrier(); + __ctl_clear_bit(0, 9); + return rc; +} + +static void __init sclp_read_info_early(void) +{ + int rc; + int i; + struct read_info_sccb *sccb; + sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, + SCLP_CMDW_READ_SCP_INFO}; + + sccb = &early_read_info_sccb; + for (i = 0; i < ARRAY_SIZE(commands); i++) { + do { + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->header.function_code = 0x80; + sccb->header.control_mask[2] = 0x80; + rc = sclp_cmd_sync_early(commands[i], sccb); + } while (rc == -EBUSY); + + if (rc) + break; + if (sccb->header.response_code == 0x10) { + sclp_early_read_info_sccb_valid = 1; + break; + } + if (sccb->header.response_code != 0x1f0) + break; + } +} + +static void __init sclp_facilities_detect(void) +{ + struct read_info_sccb *sccb; + + sclp_read_info_early(); + if (!sclp_early_read_info_sccb_valid) + return; + + sccb = &early_read_info_sccb; + sclp_facilities = sccb->facilities; + sclp_fac84 = sccb->fac84; + if (sccb->fac85 & 0x02) + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; + sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; + sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; + sclp_rzm <<= 20; +} + +bool __init sclp_has_linemode(void) +{ + struct init_sccb *sccb = &early_event_mask_sccb; + + if (sccb->header.response_code != 0x20) + return 0; + if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + return 1; +} + +bool __init sclp_has_vt220(void) +{ + struct init_sccb *sccb = &early_event_mask_sccb; + + if (sccb->header.response_code != 0x20) + return 0; + if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + return 1; + return 0; +} + +unsigned long long sclp_get_rnmax(void) +{ + return sclp_rnmax; +} + +unsigned long long sclp_get_rzm(void) +{ + return sclp_rzm; +} + +/* + * This function will be called after sclp_facilities_detect(), which gets + * called from early.c code. Therefore the sccb should have valid contents. + */ +void __init sclp_get_ipl_info(struct sclp_ipl_info *info) +{ + struct read_info_sccb *sccb; + + if (!sclp_early_read_info_sccb_valid) + return; + sccb = &early_read_info_sccb; + info->is_valid = 1; + if (sccb->flags & 0x2) + info->has_dump = 1; + memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); +} + +static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + do { + rc = sclp_cmd_sync_early(cmd, sccb); + } while (rc == -EBUSY); + + if (rc) + return -EIO; + if (((struct sccb_header *) sccb)->response_code != 0x0020) + return -EIO; + return 0; +} + +static void __init sccb_init_eq_size(struct sdias_sccb *sccb) +{ + memset(sccb, 0, sizeof(*sccb)); + + sccb->hdr.length = sizeof(*sccb); + sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb->evbuf.hdr.type = EVTYP_SDIAS; + sccb->evbuf.event_qual = SDIAS_EQ_SIZE; + sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; + sccb->evbuf.event_id = 4712; + sccb->evbuf.dbs = 1; +} + +static int __init sclp_set_event_mask(unsigned long receive_mask, + unsigned long send_mask) +{ + struct init_sccb *sccb = (void *) &sccb_early; + + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->mask_length = sizeof(sccb_mask_t); + sccb->receive_mask = receive_mask; + sccb->send_mask = send_mask; + return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); +} + +static long __init sclp_hsa_size_init(void) +{ + struct sdias_sccb *sccb = (void *) &sccb_early; + + sccb_init_eq_size(sccb); + if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) + return -EIO; + if (sccb->evbuf.blk_cnt != 0) + return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; + return 0; +} + +static long __init sclp_hsa_copy_wait(void) +{ + struct sccb_header *sccb = (void *) &sccb_early; + + memset(sccb, 0, PAGE_SIZE); + sccb->length = PAGE_SIZE; + if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) + return -EIO; + return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE; +} + +unsigned long sclp_get_hsa_size(void) +{ + return sclp_hsa_size; +} + +static void __init sclp_hsa_size_detect(void) +{ + long size; + + /* First try synchronous interface (LPAR) */ + if (sclp_set_event_mask(0, 0x40000010)) + return; + size = sclp_hsa_size_init(); + if (size < 0) + return; + if (size != 0) + goto out; + /* Then try asynchronous interface (z/VM) */ + if (sclp_set_event_mask(0x00000010, 0x40000010)) + return; + size = sclp_hsa_size_init(); + if (size < 0) + return; + size = sclp_hsa_copy_wait(); + if (size < 0) + return; +out: + sclp_hsa_size = size; +} + +void __init sclp_early_detect(void) +{ + sclp_facilities_detect(); + sclp_hsa_size_detect(); + sclp_set_event_mask(0, 0); +} diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index b1032931a1c4..561a0414b352 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -1,7 +1,7 @@ /* - * Sclp "store data in absolut storage" + * SCLP "store data in absolute storage" * - * Copyright IBM Corp. 2003, 2007 + * Copyright IBM Corp. 2003, 2013 * Author(s): Michael Holzheu */ @@ -14,6 +14,7 @@ #include <asm/debug.h> #include <asm/ipl.h> +#include "sclp_sdias.h" #include "sclp.h" #include "sclp_rw.h" @@ -22,46 +23,12 @@ #define SDIAS_RETRIES 300 #define SDIAS_SLEEP_TICKS 50 -#define EQ_STORE_DATA 0x0 -#define EQ_SIZE 0x1 -#define DI_FCP_DUMP 0x0 -#define ASA_SIZE_32 0x0 -#define ASA_SIZE_64 0x1 -#define EVSTATE_ALL_STORED 0x0 -#define EVSTATE_NO_DATA 0x3 -#define EVSTATE_PART_STORED 0x10 - static struct debug_info *sdias_dbf; static struct sclp_register sclp_sdias_register = { .send_mask = EVTYP_SDIAS_MASK, }; -struct sdias_evbuf { - struct evbuf_header hdr; - u8 event_qual; - u8 data_id; - u64 reserved2; - u32 event_id; - u16 reserved3; - u8 asa_size; - u8 event_status; - u32 reserved4; - u32 blk_cnt; - u64 asa; - u32 reserved5; - u32 fbn; - u32 reserved6; - u32 lbn; - u16 reserved7; - u16 dbs; -} __attribute__((packed)); - -struct sdias_sccb { - struct sccb_header hdr; - struct sdias_evbuf evbuf; -} __attribute__((packed)); - static struct sdias_sccb sccb __attribute__((aligned(4096))); static struct sdias_evbuf sdias_evbuf; @@ -148,8 +115,8 @@ int sclp_sdias_blk_count(void) sccb.hdr.length = sizeof(sccb); sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; - sccb.evbuf.event_qual = EQ_SIZE; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_SIZE; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; sccb.evbuf.dbs = 1; @@ -208,13 +175,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; sccb.evbuf.hdr.flags = 0; - sccb.evbuf.event_qual = EQ_STORE_DATA; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; #ifdef CONFIG_64BIT - sccb.evbuf.asa_size = ASA_SIZE_64; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; #else - sccb.evbuf.asa_size = ASA_SIZE_32; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32; #endif sccb.evbuf.event_status = 0; sccb.evbuf.blk_cnt = nr_blks; @@ -240,20 +207,19 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) } switch (sdias_evbuf.event_status) { - case EVSTATE_ALL_STORED: - TRACE("all stored\n"); - break; - case EVSTATE_PART_STORED: - TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); - break; - case EVSTATE_NO_DATA: - TRACE("no data\n"); - /* fall through */ - default: - pr_err("Error from SCLP while copying hsa. " - "Event status = %x\n", - sdias_evbuf.event_status); - rc = -EIO; + case SDIAS_EVSTATE_ALL_STORED: + TRACE("all stored\n"); + break; + case SDIAS_EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); + break; + case SDIAS_EVSTATE_NO_DATA: + TRACE("no data\n"); + /* fall through */ + default: + pr_err("Error from SCLP while copying hsa. Event status = %x\n", + sdias_evbuf.event_status); + rc = -EIO; } out: mutex_unlock(&sdias_mutex); diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h new file mode 100644 index 000000000000..f2431c414150 --- /dev/null +++ b/drivers/s390/char/sclp_sdias.h @@ -0,0 +1,46 @@ +/* + * SCLP "store data in absolute storage" + * + * Copyright IBM Corp. 2003, 2013 + */ + +#ifndef SCLP_SDIAS_H +#define SCLP_SDIAS_H + +#include "sclp.h" + +#define SDIAS_EQ_STORE_DATA 0x0 +#define SDIAS_EQ_SIZE 0x1 +#define SDIAS_DI_FCP_DUMP 0x0 +#define SDIAS_ASA_SIZE_32 0x0 +#define SDIAS_ASA_SIZE_64 0x1 +#define SDIAS_EVSTATE_ALL_STORED 0x0 +#define SDIAS_EVSTATE_NO_DATA 0x3 +#define SDIAS_EVSTATE_PART_STORED 0x10 + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __packed; + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __packed; + +#endif /* SCLP_SDIAS_H */ diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index ffb1fcf0bf5b..3d8e4d63f514 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -328,9 +328,9 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, mem_offs = 0; /* Copy from HSA data */ - if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { - size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE - - mem_start)); + if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) { + size = min((count - hdr_count), + (size_t) (sclp_get_hsa_size() - mem_start)); rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); if (rc) goto fail; @@ -490,7 +490,7 @@ static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, static char str[18]; if (hsa_available) - snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE); + snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size()); else snprintf(str, sizeof(str), "0\n"); return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); @@ -584,17 +584,9 @@ static int __init sys_info_init(enum arch_id arch, unsigned long mem_end) static int __init check_sdias(void) { - int rc, act_hsa_size; - - rc = sclp_sdias_blk_count(); - if (rc < 0) { + if (!sclp_get_hsa_size()) { TRACE("Could not determine HSA size\n"); - return rc; - } - act_hsa_size = (rc - 1) * PAGE_SIZE; - if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { - TRACE("HSA size too small: %i\n", act_hsa_size); - return -EINVAL; + return -ENODEV; } return 0; } @@ -662,7 +654,7 @@ static int __init zcore_reipl_init(void) ipl_block = (void *) __get_free_page(GFP_KERNEL); if (!ipl_block) return -ENOMEM; - if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) + if (ipib_info.ipib < sclp_get_hsa_size()) rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); else rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index aca7bfc113aa..3a2ee4a740b4 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -190,7 +190,7 @@ static struct subchannel *eadm_get_idle_sch(void) return NULL; } -static int eadm_start_aob(struct aob *aob) +int eadm_start_aob(struct aob *aob) { struct eadm_private *private; struct subchannel *sch; @@ -218,6 +218,7 @@ out_unlock: return ret; } +EXPORT_SYMBOL_GPL(eadm_start_aob); static int eadm_subchannel_probe(struct subchannel *sch) { @@ -380,11 +381,6 @@ static struct css_driver eadm_subchannel_driver = { .restore = eadm_subchannel_restore, }; -static struct eadm_ops eadm_ops = { - .eadm_start = eadm_start_aob, - .owner = THIS_MODULE, -}; - static int __init eadm_sch_init(void) { int ret; @@ -404,7 +400,6 @@ static int __init eadm_sch_init(void) if (ret) goto cleanup; - register_eadm_ops(&eadm_ops); return ret; cleanup: @@ -415,7 +410,6 @@ cleanup: static void __exit eadm_sch_exit(void) { - unregister_eadm_ops(&eadm_ops); css_driver_unregister(&eadm_subchannel_driver); isc_unregister(EADM_SCH_ISC); debug_unregister(eadm_debug); diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index 46ec25632e8b..15268edc54ae 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c @@ -15,8 +15,6 @@ #include "chsc.h" static struct device *scm_root; -static struct eadm_ops *eadm_ops; -static DEFINE_MUTEX(eadm_ops_mutex); #define to_scm_dev(n) container_of(n, struct scm_device, dev) #define to_scm_drv(d) container_of(d, struct scm_driver, drv) @@ -73,49 +71,6 @@ void scm_driver_unregister(struct scm_driver *scmdrv) } EXPORT_SYMBOL_GPL(scm_driver_unregister); -int scm_get_ref(void) -{ - int ret = 0; - - mutex_lock(&eadm_ops_mutex); - if (!eadm_ops || !try_module_get(eadm_ops->owner)) - ret = -ENOENT; - mutex_unlock(&eadm_ops_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(scm_get_ref); - -void scm_put_ref(void) -{ - mutex_lock(&eadm_ops_mutex); - module_put(eadm_ops->owner); - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(scm_put_ref); - -void register_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = ops; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(register_eadm_ops); - -void unregister_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = NULL; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(unregister_eadm_ops); - -int scm_start_aob(struct aob *aob) -{ - return eadm_ops->eadm_start(aob); -} -EXPORT_SYMBOL_GPL(scm_start_aob); - void scm_irq_handler(struct aob *aob, int error) { struct aob_rq_header *aobrq = (void *) aob->request.data; diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 132a905b6bdb..0ca64484cfa3 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -344,7 +344,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, /** * zfcp_dbf_san_req - trace event for issued SAN request - * @tag: indentifier for event + * @tag: identifier for event * @fsf_req: request containing issued CT data * d_id: destination ID */ @@ -361,7 +361,7 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) /** * zfcp_dbf_san_res - trace event for received SAN request - * @tag: indentifier for event + * @tag: identifier for event * @fsf_req: request containing issued CT data */ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) @@ -377,7 +377,7 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) /** * zfcp_dbf_san_in_els - trace event for incoming ELS - * @tag: indentifier for event + * @tag: identifier for event * @fsf_req: request containing issued CT data */ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d85ac1a9d2c0..fbcd48d0bfc3 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { + if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 33c52bc2c7b4..97fd450aff09 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1035,7 +1035,6 @@ static void arcmsr_remove(struct pci_dev *pdev) pci_release_regions(pdev); scsi_host_put(host); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static void arcmsr_shutdown(struct pci_dev *pdev) diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 15a629d8ed08..a795d81ef875 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -3144,8 +3144,6 @@ static void atp870u_remove (struct pci_dev *pdev) atp870u_free_tables(pshost); printk(KERN_INFO "scsi_host_put : %p\n",pshost); scsi_host_put(pshost); - printk(KERN_INFO "pci_set_drvdata : %p\n",pdev); - pci_set_drvdata(pdev, NULL); } MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 7591fa4e28bb..fc80a325a1e6 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -804,7 +804,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) /* Disable PCIE Advanced Error Recovery (AER) */ pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } bfa_status_t diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 5be718c241c4..e4cf23df4b4f 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -126,7 +126,7 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) /** * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * @action: action, ARM or DISARM. For now only ARM_CQE is used * * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt @@ -756,7 +756,7 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) /** * bnx2i_send_conn_destroy - initiates iscsi connection teardown process * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate * iscsi connection context clean-up process @@ -791,7 +791,7 @@ int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /** * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE */ @@ -851,7 +851,7 @@ static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, /** * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE */ @@ -920,7 +920,7 @@ static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process * * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE */ @@ -939,7 +939,7 @@ int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /** * setup_qp_page_tables - iscsi QP page table setup function - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires * 64-bit address in big endian format. Whereas 10G/sec (57710) requires @@ -1046,7 +1046,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) /** * bnx2i_alloc_qp_resc - allocates required resources for QP. * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * Allocate QP (transport layer for iSCSI connection) resources, DMA'able * memory for SQ/RQ/CQ and page tables. EP structure elements such @@ -1191,7 +1191,7 @@ mem_alloc_err: /** * bnx2i_free_qp_resc - free memory resources held by QP * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * Free QP resources - SQ/RQ/CQ memory and page tables. */ diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index fabeb88602ac..854dad7d5b03 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -596,7 +596,7 @@ void bnx2i_drop_session(struct iscsi_cls_session *cls_session) /** * bnx2i_ep_destroy_list_add - add an entry to EP destroy list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ @@ -613,7 +613,7 @@ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, * bnx2i_ep_destroy_list_del - add an entry to EP destroy list * * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ @@ -630,7 +630,7 @@ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, /** * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ @@ -646,7 +646,7 @@ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, /** * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ @@ -721,7 +721,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) /** * bnx2i_ep_active_list_add - add an entry to ep active list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ @@ -737,7 +737,7 @@ static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, /** * bnx2i_ep_active_list_del - deletes an entry to ep active list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ @@ -1695,7 +1695,7 @@ no_nx2_route: /** * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources * @hba: pointer to adapter instance - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * destroys cm_sock structure and on chip iscsi context */ diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 00346fe939d5..1aafc331ee63 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -1010,7 +1010,6 @@ err_lnode_exit: csio_hw_stop(hw); spin_unlock_irq(&hw->lock); csio_lnodes_unblock_request(hw); - pci_set_drvdata(hw->pdev, NULL); csio_lnodes_exit(hw, 0); csio_hw_free(hw); err_pci_exit: @@ -1044,7 +1043,6 @@ static void csio_remove_one(struct pci_dev *pdev) csio_lnodes_exit(hw, 0); csio_hw_free(hw); - pci_set_drvdata(pdev, NULL); csio_pci_exit(pdev, &bars); } diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 42e8624a9b9a..83d9bf6fa6ca 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -4861,7 +4861,6 @@ static void dc395x_remove_one(struct pci_dev *dev) adapter_uninit(acb); pci_disable_device(dev); scsi_host_put(scsi_host); - pci_set_drvdata(dev, NULL); } diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index be09b101b4a1..33e4ec2bfe73 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -1005,7 +1005,6 @@ static void fnic_remove(struct pci_dev *pdev) fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); scsi_host_put(lp->host); } diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 6d55b4e7e792..ee4fa40a50b1 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -594,8 +594,6 @@ static void gdth_pci_remove_one(struct pci_dev *pdev) { gdth_ha_str *ha = pci_get_drvdata(pdev); - pci_set_drvdata(pdev, NULL); - list_del(&ha->list); gdth_remove_one(ha); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index fb5a89815150..22f6432eb475 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5017,7 +5017,6 @@ static void hpsa_remove_one(struct pci_dev *pdev) kfree(h->hba_inquiry_data); pci_disable_device(pdev); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); kfree(h); } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ca6bf2af7ce8..68c94cc85c35 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4581,8 +4581,6 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba) /* Release PCI resource and disable PCI device */ pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); - /* Null out PCI private reference to driver */ - pci_set_drvdata(pdev, NULL); return; } @@ -9429,7 +9427,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Disable interrupt */ lpfc_sli_disable_intr(phba); - pci_set_drvdata(pdev, NULL); scsi_host_put(shost); /* diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 515c9629e9fe..d1a4b82836ea 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -534,7 +534,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; out_cmm_unreg: - pci_set_drvdata(pdev, NULL); megaraid_cmm_unregister(adapter); out_fini_mbox: megaraid_fini_mbox(adapter); @@ -594,11 +593,6 @@ megaraid_detach_one(struct pci_dev *pdev) // detach from the IO sub-system megaraid_io_detach(adapter); - // reset the device state in the PCI structure. We check this - // condition when we enter here. If the device state is NULL, - // that would mean the device has already been removed - pci_set_drvdata(pdev, NULL); - // Unregister from common management module // // FIXME: this must return success or failure for conditions if there diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2cf9470dd11b..0a743a5d1647 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4451,7 +4451,6 @@ retry_irq_register: megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; megasas_mgmt_info.max_index--; - pci_set_drvdata(pdev, NULL); instance->instancet->disable_intr(instance); if (instance->msix_vectors) for (i = 0 ; i < instance->msix_vectors; i++) @@ -4807,8 +4806,6 @@ static void megasas_detach_one(struct pci_dev *pdev) } } - pci_set_drvdata(instance->pdev, NULL); - instance->instancet->disable_intr(instance); if (instance->msix_vectors) @@ -4850,8 +4847,6 @@ static void megasas_detach_one(struct pci_dev *pdev) instance->evt_detail, instance->evt_detail_h); scsi_host_put(host); - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); return; diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 7b7381d7671f..5ff978be249d 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -657,7 +657,6 @@ static void mvs_pci_remove(struct pci_dev *pdev) tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #endif - pci_set_drvdata(pdev, NULL); sas_unregister_ha(sha); sas_remove_host(mvi->shost); scsi_remove_host(mvi->shost); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 6b1b4e91e53f..6c1f223a8e1d 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1411,7 +1411,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, if (res) { del_timer(&task->slow_task->timer); - mv_printk("executing internel task failed:%d\n", res); + mv_printk("executing internal task failed:%d\n", res); goto ex_err; } diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index c3601b57a80c..edbee8dc62c9 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2583,7 +2583,6 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; fail_io_attach: - pci_set_drvdata(pdev, NULL); mhba->instancet->disable_intr(mhba); free_irq(mhba->pdev->irq, mhba); fail_init_irq: @@ -2618,7 +2617,6 @@ static void mvumi_detach_one(struct pci_dev *pdev) free_irq(mhba->pdev->irq, mhba); mvumi_release_fw(mhba); scsi_host_put(host); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); dev_dbg(&pdev->dev, "driver is removed!\n"); } diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 5982a587babc..7d014b11df62 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -1615,7 +1615,7 @@ struct ncb { spinlock_t smp_lock; /* Lock for SMP threading */ /*---------------------------------------------------------------- - ** Chip and controller indentification. + ** Chip and controller identification. **---------------------------------------------------------------- */ int unit; /* Unit number */ diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 662bf13c42f0..34f5f5ffef05 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -909,7 +909,6 @@ static void pm8001_pci_remove(struct pci_dev *pdev) struct pm8001_hba_info *pm8001_ha; int i; pm8001_ha = sha->lldd_ha; - pci_set_drvdata(pdev, NULL); sas_unregister_ha(sha); sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 1eb7b0280a45..bd6f743d87a7 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1512,7 +1512,8 @@ static int pmcraid_notify_aen( } result = - genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); + genlmsg_multicast(&pmcraid_event_family, skb, 0, + pmcraid_event_family.id, GFP_ATOMIC); /* If there are no listeners, genlmsg_multicast may return non-zero * value. @@ -6049,7 +6050,6 @@ out_release_regions: out_disable_device: atomic_dec(&pmcraid_adapter_count); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return -ENODEV; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index bcd57f699ebb..52be35e0300c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3179,7 +3179,6 @@ qla2x00_remove_one(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static void diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 6dc3e99b7f9c..a28d5e624aab 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -7827,7 +7827,6 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } /** diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index f379c7f3034c..2700a5a09bd4 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -24,12 +24,15 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/delay.h> #include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_srp.h> +#include "scsi_priv.h" #include "scsi_transport_srp_internal.h" struct srp_host_attrs { @@ -38,7 +41,7 @@ struct srp_host_attrs { #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) #define SRP_HOST_ATTRS 0 -#define SRP_RPORT_ATTRS 3 +#define SRP_RPORT_ATTRS 8 struct srp_internal { struct scsi_transport_template t; @@ -54,6 +57,36 @@ struct srp_internal { #define dev_to_rport(d) container_of(d, struct srp_rport, dev) #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) +static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) +{ + return dev_to_shost(r->dev.parent); +} + +/** + * srp_tmo_valid() - check timeout combination validity + * + * The combination of the timeout parameters must be such that SCSI commands + * are finished in a reasonable time. Hence do not allow the fast I/O fail + * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these + * parameters must be such that multipath can detect failed paths timely. + * Hence do not allow all three parameters to be disabled simultaneously. + */ +int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo) +{ + if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) + return -EINVAL; + if (reconnect_delay == 0) + return -EINVAL; + if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + if (dev_loss_tmo >= LONG_MAX / HZ) + return -EINVAL; + if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 && + fast_io_fail_tmo >= dev_loss_tmo) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(srp_tmo_valid); static int srp_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) @@ -134,10 +167,465 @@ static ssize_t store_srp_rport_delete(struct device *dev, static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete); +static ssize_t show_srp_rport_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + static const char *const state_name[] = { + [SRP_RPORT_RUNNING] = "running", + [SRP_RPORT_BLOCKED] = "blocked", + [SRP_RPORT_FAIL_FAST] = "fail-fast", + [SRP_RPORT_LOST] = "lost", + }; + struct srp_rport *rport = transport_class_to_srp_rport(dev); + enum srp_rport_state state = rport->state; + + return sprintf(buf, "%s\n", + (unsigned)state < ARRAY_SIZE(state_name) ? + state_name[state] : "???"); +} + +static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL); + +static ssize_t srp_show_tmo(char *buf, int tmo) +{ + return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); +} + +static int srp_parse_tmo(int *tmo, const char *buf) +{ + int res = 0; + + if (strncmp(buf, "off", 3) != 0) + res = kstrtoint(buf, 0, tmo); + else + *tmo = -1; + + return res; +} + +static ssize_t show_reconnect_delay(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->reconnect_delay); +} + +static ssize_t store_reconnect_delay(struct device *dev, + struct device_attribute *attr, + const char *buf, const size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res, delay; + + res = srp_parse_tmo(&delay, buf); + if (res) + goto out; + res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + + if (rport->reconnect_delay <= 0 && delay > 0 && + rport->state != SRP_RPORT_RUNNING) { + queue_delayed_work(system_long_wq, &rport->reconnect_work, + delay * HZ); + } else if (delay <= 0) { + cancel_delayed_work(&rport->reconnect_work); + } + rport->reconnect_delay = delay; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay, + store_reconnect_delay); + +static ssize_t show_failed_reconnects(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return sprintf(buf, "%d\n", rport->failed_reconnects); +} + +static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL); + +static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->fast_io_fail_tmo); +} + +static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int fast_io_fail_tmo; + + res = srp_parse_tmo(&fast_io_fail_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + rport->fast_io_fail_tmo = fast_io_fail_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_fast_io_fail_tmo, + store_srp_rport_fast_io_fail_tmo); + +static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->dev_loss_tmo); +} + +static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int dev_loss_tmo; + + res = srp_parse_tmo(&dev_loss_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, + dev_loss_tmo); + if (res) + goto out; + rport->dev_loss_tmo = dev_loss_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_dev_loss_tmo, + store_srp_rport_dev_loss_tmo); + +static int srp_rport_set_state(struct srp_rport *rport, + enum srp_rport_state new_state) +{ + enum srp_rport_state old_state = rport->state; + + lockdep_assert_held(&rport->mutex); + + switch (new_state) { + case SRP_RPORT_RUNNING: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_BLOCKED: + switch (old_state) { + case SRP_RPORT_RUNNING: + break; + default: + goto invalid; + } + break; + case SRP_RPORT_FAIL_FAST: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_LOST: + break; + } + rport->state = new_state; + return 0; + +invalid: + return -EINVAL; +} + +/** + * srp_reconnect_work() - reconnect and schedule a new attempt if necessary + */ +static void srp_reconnect_work(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, reconnect_work); + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, res; + + res = srp_reconnect_rport(rport); + if (res != 0) { + shost_printk(KERN_ERR, shost, + "reconnect attempt %d failed (%d)\n", + ++rport->failed_reconnects, res); + delay = rport->reconnect_delay * + min(100, max(1, rport->failed_reconnects - 10)); + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, delay * HZ); + } +} + +static void __rport_fail_io_fast(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i; + + lockdep_assert_held(&rport->mutex); + + if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) + return; + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + + /* Involve the LLD if possible to terminate all I/O on the rport. */ + i = to_srp_internal(shost->transportt); + if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); +} + +/** + * rport_fast_io_fail_timedout() - fast I/O failure timeout handler + */ +static void rport_fast_io_fail_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, fast_io_fail_work); + struct Scsi_Host *shost = rport_to_shost(rport); + + pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + mutex_unlock(&rport->mutex); +} + +/** + * rport_dev_loss_timedout() - device loss timeout handler + */ +static void rport_dev_loss_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, dev_loss_work); + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_info("dev_loss_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + mutex_unlock(&rport->mutex); + + i->f->rport_delete(rport); +} + +static void __srp_start_tl_fail_timers(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, fast_io_fail_tmo, dev_loss_tmo; + + lockdep_assert_held(&rport->mutex); + + if (!rport->deleted) { + delay = rport->reconnect_delay; + fast_io_fail_tmo = rport->fast_io_fail_tmo; + dev_loss_tmo = rport->dev_loss_tmo; + pr_debug("%s current state: %d\n", + dev_name(&shost->shost_gendev), rport->state); + + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, + 1UL * delay * HZ); + if (fast_io_fail_tmo >= 0 && + srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { + pr_debug("%s new state: %d\n", + dev_name(&shost->shost_gendev), + rport->state); + scsi_target_block(&shost->shost_gendev); + queue_delayed_work(system_long_wq, + &rport->fast_io_fail_work, + 1UL * fast_io_fail_tmo * HZ); + } + if (dev_loss_tmo >= 0) + queue_delayed_work(system_long_wq, + &rport->dev_loss_work, + 1UL * dev_loss_tmo * HZ); + } else { + pr_debug("%s has already been deleted\n", + dev_name(&shost->shost_gendev)); + srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST); + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } +} + +/** + * srp_start_tl_fail_timers() - start the transport layer failure timers + * + * Start the transport layer fast I/O failure and device loss timers. Do not + * modify a timer that was already started. + */ +void srp_start_tl_fail_timers(struct srp_rport *rport) +{ + mutex_lock(&rport->mutex); + __srp_start_tl_fail_timers(rport); + mutex_unlock(&rport->mutex); +} +EXPORT_SYMBOL(srp_start_tl_fail_timers); + +/** + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() + */ +static int scsi_request_fn_active(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + struct request_queue *q; + int request_fn_active = 0; + + shost_for_each_device(sdev, shost) { + q = sdev->request_queue; + + spin_lock_irq(q->queue_lock); + request_fn_active += q->request_fn_active; + spin_unlock_irq(q->queue_lock); + } + + return request_fn_active; +} + +/** + * srp_reconnect_rport() - reconnect to an SRP target port + * + * Blocks SCSI command queueing before invoking reconnect() such that + * queuecommand() won't be invoked concurrently with reconnect() from outside + * the SCSI EH. This is important since a reconnect() implementation may + * reallocate resources needed by queuecommand(). + * + * Notes: + * - This function neither waits until outstanding requests have finished nor + * tries to abort these. It is the responsibility of the reconnect() + * function to finish outstanding commands before reconnecting to the target + * port. + * - It is the responsibility of the caller to ensure that the resources + * reallocated by the reconnect() function won't be used while this function + * is in progress. One possible strategy is to invoke this function from + * the context of the SCSI EH thread only. Another possible strategy is to + * lock the rport mutex inside each SCSI LLD callback that can be invoked by + * the SCSI EH (the scsi_host_template.eh_*() functions and also the + * scsi_host_template.queuecommand() function). + */ +int srp_reconnect_rport(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + struct scsi_device *sdev; + int res; + + pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev)); + + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; + scsi_target_block(&shost->shost_gendev); + while (scsi_request_fn_active(shost)) + msleep(20); + res = i->f->reconnect(rport); + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); + if (res == 0) { + cancel_delayed_work(&rport->fast_io_fail_work); + cancel_delayed_work(&rport->dev_loss_work); + + rport->failed_reconnects = 0; + srp_rport_set_state(rport, SRP_RPORT_RUNNING); + scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); + /* + * If the SCSI error handler has offlined one or more devices, + * invoking scsi_target_unblock() won't change the state of + * these devices into running so do that explicitly. + */ + spin_lock_irq(shost->host_lock); + __shost_for_each_device(sdev, shost) + if (sdev->sdev_state == SDEV_OFFLINE) + sdev->sdev_state = SDEV_RUNNING; + spin_unlock_irq(shost->host_lock); + } else if (rport->state == SRP_RPORT_RUNNING) { + /* + * srp_reconnect_rport() was invoked with fast_io_fail + * off. Mark the port as failed and start the TL failure + * timers if these had not yet been started. + */ + __rport_fail_io_fast(rport); + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + __srp_start_tl_fail_timers(rport); + } else if (rport->state != SRP_RPORT_BLOCKED) { + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } + mutex_unlock(&rport->mutex); + +out: + return res; +} +EXPORT_SYMBOL(srp_reconnect_rport); + +/** + * srp_timed_out() - SRP transport intercept of the SCSI timeout EH + * + * If a timeout occurs while an rport is in the blocked state, ask the SCSI + * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (BLK_EH_NOT_HANDLED). + * + * Note: This function is called from soft-IRQ context and with the request + * queue lock held. + */ +static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); + return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? + BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + static void srp_rport_release(struct device *dev) { struct srp_rport *rport = dev_to_rport(dev); + cancel_delayed_work_sync(&rport->reconnect_work); + cancel_delayed_work_sync(&rport->fast_io_fail_work); + cancel_delayed_work_sync(&rport->dev_loss_work); + put_device(dev->parent); kfree(rport); } @@ -185,6 +673,24 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev) } /** + * srp_rport_get() - increment rport reference count + */ +void srp_rport_get(struct srp_rport *rport) +{ + get_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_get); + +/** + * srp_rport_put() - decrement rport reference count + */ +void srp_rport_put(struct srp_rport *rport) +{ + put_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_put); + +/** * srp_rport_add - add a SRP remote port to the device hierarchy * @shost: scsi host the remote port is connected to. * @ids: The port id for the remote port. @@ -196,12 +702,15 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, { struct srp_rport *rport; struct device *parent = &shost->shost_gendev; + struct srp_internal *i = to_srp_internal(shost->transportt); int id, ret; rport = kzalloc(sizeof(*rport), GFP_KERNEL); if (!rport) return ERR_PTR(-ENOMEM); + mutex_init(&rport->mutex); + device_initialize(&rport->dev); rport->dev.parent = get_device(parent); @@ -210,6 +719,17 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); rport->roles = ids->roles; + if (i->f->reconnect) + rport->reconnect_delay = i->f->reconnect_delay ? + *i->f->reconnect_delay : 10; + INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); + rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? + *i->f->fast_io_fail_tmo : 15; + rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; + INIT_DELAYED_WORK(&rport->fast_io_fail_work, + rport_fast_io_fail_timedout); + INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); + id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); @@ -259,6 +779,13 @@ void srp_rport_del(struct srp_rport *rport) transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + rport->deleted = true; + mutex_unlock(&rport->mutex); + put_device(dev); } EXPORT_SYMBOL_GPL(srp_rport_del); @@ -310,6 +837,8 @@ srp_attach_transport(struct srp_function_template *ft) if (!i) return NULL; + i->t.eh_timed_out = srp_timed_out; + i->t.tsk_mgmt_response = srp_tsk_mgmt_response; i->t.it_nexus_response = srp_it_nexus_response; @@ -327,6 +856,15 @@ srp_attach_transport(struct srp_function_template *ft) count = 0; i->rport_attrs[count++] = &dev_attr_port_id; i->rport_attrs[count++] = &dev_attr_roles; + if (ft->has_rport_state) { + i->rport_attrs[count++] = &dev_attr_state; + i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo; + i->rport_attrs[count++] = &dev_attr_dev_loss_tmo; + } + if (ft->reconnect) { + i->rport_attrs[count++] = &dev_attr_reconnect_delay; + i->rport_attrs[count++] = &dev_attr_failed_reconnects; + } if (ft->rport_delete) i->rport_attrs[count++] = &dev_attr_delete; i->rport_attrs[count++] = NULL; diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 325c31caa6e0..1aa4befcfbd0 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -1790,8 +1790,6 @@ static void stex_remove(struct pci_dev *pdev) scsi_remove_host(hba->host); - pci_set_drvdata(pdev, NULL); - stex_hba_stop(hba); stex_hba_free(hba); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h index b80bf709f104..805369521df8 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.h +++ b/drivers/scsi/sym53c8xx_2/sym_glue.h @@ -174,7 +174,7 @@ struct sym_slcb { */ struct sym_shcb { /* - * Chip and controller indentification. + * Chip and controller identification. */ int unit; char inst_name[16]; diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 11423615c2ea..b006cf789ba1 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c @@ -2553,7 +2553,6 @@ static void dc390_remove_one(struct pci_dev *dev) pci_disable_device(dev); scsi_host_put(scsi_host); - pci_set_drvdata(dev, NULL); } static struct pci_device_id tmscsim_pci_tbl[] = { diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index a823cf44e949..8b9531204c2b 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -132,7 +132,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); ufshcd_remove(hba); - pci_set_drvdata(pdev, NULL); } /** diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 3bfaa66fa0d1..b9755ec0e812 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -1405,7 +1405,6 @@ out_release_resources: out_free_host: scsi_host_put(host); out_disable_device: - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return error; @@ -1445,7 +1444,6 @@ static void pvscsi_remove(struct pci_dev *pdev) scsi_host_put(host); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index b9f0192758d6..6d207afec8cb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->tx_sgl, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); txdesc->callback = dw_spi_dma_done; txdesc->callback_param = dws; @@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->rx_sgl, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); rxdesc->callback = dw_spi_dma_done; rxdesc->callback_param = dws; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8d85ddc46011..18cc625d887f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -357,6 +357,19 @@ struct spi_device *spi_alloc_device(struct spi_master *master) } EXPORT_SYMBOL_GPL(spi_alloc_device); +static void spi_dev_set_name(struct spi_device *spi) +{ + struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + + if (adev) { + dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); + return; + } + + dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), + spi->chip_select); +} + /** * spi_add_device - Add spi_device allocated with spi_alloc_device * @spi: spi_device to register @@ -383,9 +396,7 @@ int spi_add_device(struct spi_device *spi) } /* Set the bus ID string */ - dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), - spi->chip_select); - + spi_dev_set_name(spi); /* We need to make sure there's no other device with this * chipselect **BEFORE** we call setup(), else we'll trash @@ -1144,7 +1155,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, return AE_NO_MEMORY; } - ACPI_HANDLE_SET(&spi->dev, handle); + ACPI_COMPANION_SET(&spi->dev, adev); spi->irq = -1; INIT_LIST_HEAD(&resource_list); diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h index 9221a74efd18..93c7299e8353 100644 --- a/drivers/staging/iio/adc/ad7606.h +++ b/drivers/staging/iio/adc/ad7606.h @@ -42,7 +42,7 @@ struct ad7606_platform_data { /** * struct ad7606_chip_info - chip specifc information - * @name: indentification string for chip + * @name: identification string for chip * @int_vref_mv: the internal reference voltage * @channels: channel specification * @num_channels: number of channels diff --git a/drivers/staging/media/lirc/TODO b/drivers/staging/media/lirc/TODO index b6cb593f55c6..cbea5d84fed3 100644 --- a/drivers/staging/media/lirc/TODO +++ b/drivers/staging/media/lirc/TODO @@ -2,6 +2,11 @@ (see drivers/media/IR/mceusb.c vs. lirc_mceusb.c in lirc cvs for an example of a previously completed port). +- lirc_bt829 uses registers on a Mach64 VT, which has a separate kernel + framebuffer driver (atyfb) and userland X driver (mach64). It can't + simply be converted to a normal PCI driver, but ideally it should be + coordinated with the other drivers. + Please send patches to: Jarod Wilson <jarod@wilsonet.com> Greg Kroah-Hartman <greg@kroah.com> diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c index fbbdce4c119f..30edc740ac25 100644 --- a/drivers/staging/media/lirc/lirc_bt829.c +++ b/drivers/staging/media/lirc/lirc_bt829.c @@ -63,7 +63,7 @@ static bool debug; } while (0) static int atir_minor; -static unsigned long pci_addr_phys; +static phys_addr_t pci_addr_phys; static unsigned char *pci_addr_lin; static struct lirc_driver atir_driver; @@ -78,11 +78,11 @@ static struct pci_dev *do_pci_probe(void) pci_addr_phys = 0; if (my_dev->resource[0].flags & IORESOURCE_MEM) { pci_addr_phys = my_dev->resource[0].start; - pr_info("memory at 0x%08X\n", - (unsigned int)pci_addr_phys); + pr_info("memory at %pa\n", &pci_addr_phys); } if (pci_addr_phys == 0) { pr_err("no memory resource ?\n"); + pci_dev_put(my_dev); return NULL; } } else { @@ -120,13 +120,20 @@ static void atir_set_use_dec(void *data) int init_module(void) { struct pci_dev *pdev; + int rc; pdev = do_pci_probe(); if (pdev == NULL) return -ENODEV; - if (!atir_init_start()) - return -ENODEV; + rc = pci_enable_device(pdev); + if (rc) + goto err_put_dev; + + if (!atir_init_start()) { + rc = -ENODEV; + goto err_disable; + } strcpy(atir_driver.name, "ATIR"); atir_driver.minor = -1; @@ -142,17 +149,31 @@ int init_module(void) atir_minor = lirc_register_driver(&atir_driver); if (atir_minor < 0) { pr_err("failed to register driver!\n"); - return atir_minor; + rc = atir_minor; + goto err_unmap; } dprintk("driver is registered on minor %d\n", atir_minor); return 0; + +err_unmap: + iounmap(pci_addr_lin); +err_disable: + pci_disable_device(pdev); +err_put_dev: + pci_dev_put(pdev); + return rc; } void cleanup_module(void) { + struct pci_dev *pdev = to_pci_dev(atir_driver.dev); + lirc_unregister_driver(atir_minor); + iounmap(pci_addr_lin); + pci_disable_device(pdev); + pci_dev_put(pdev); } diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c index f6bc4c91ab35..2e3a98575d47 100644 --- a/drivers/staging/media/lirc/lirc_serial.c +++ b/drivers/staging/media/lirc/lirc_serial.c @@ -707,7 +707,8 @@ static irqreturn_t irq_handler(int i, void *blah) pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n", dcd, sense, tv.tv_sec, lasttv.tv_sec, - tv.tv_usec, lasttv.tv_usec); + (unsigned long)tv.tv_usec, + (unsigned long)lasttv.tv_usec); continue; } @@ -719,7 +720,8 @@ static irqreturn_t irq_handler(int i, void *blah) pr_warn("%d %d %lx %lx %lx %lx\n", dcd, sense, tv.tv_sec, lasttv.tv_sec, - tv.tv_usec, lasttv.tv_usec); + (unsigned long)tv.tv_usec, + (unsigned long)lasttv.tv_usec); data = PULSE_MASK; } else if (deltv > 15) { data = PULSE_MASK; /* really long time */ @@ -728,7 +730,8 @@ static irqreturn_t irq_handler(int i, void *blah) pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n", dcd, sense, tv.tv_sec, lasttv.tv_sec, - tv.tv_usec, lasttv.tv_usec); + (unsigned long)tv.tv_usec, + (unsigned long)lasttv.tv_usec); /* * detecting pulse while this * MUST be a space! diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 11d5338b4f2f..0feeaadf29dc 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -61,6 +61,9 @@ #include <media/lirc_dev.h> #include <media/lirc.h> +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct IR; struct IR_rx { @@ -941,7 +944,14 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos) schedule(); set_current_state(TASK_INTERRUPTIBLE); } else { - unsigned char buf[rbuf->chunk_size]; + unsigned char buf[MAX_XFER_SIZE]; + + if (rbuf->chunk_size > sizeof(buf)) { + zilog_error("chunk_size is too big (%d)!\n", + rbuf->chunk_size); + ret = -EINVAL; + break; + } m = lirc_buffer_read(rbuf, buf); if (m == rbuf->chunk_size) { ret = copy_to_user((void *)outbuf+written, buf, diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig index 76d5bbd4d93c..0c349c8595e4 100644 --- a/drivers/staging/media/msi3101/Kconfig +++ b/drivers/staging/media/msi3101/Kconfig @@ -1,4 +1,5 @@ config USB_MSI3101 tristate "Mirics MSi3101 SDR Dongle" depends on USB && VIDEO_DEV && VIDEO_V4L2 - select VIDEOBUF2_VMALLOC + select VIDEOBUF2_CORE + select VIDEOBUF2_VMALLOC diff --git a/drivers/staging/media/solo6x10/solo6x10-disp.c b/drivers/staging/media/solo6x10/solo6x10-disp.c index 32d9953bc36e..145295a5db72 100644 --- a/drivers/staging/media/solo6x10/solo6x10-disp.c +++ b/drivers/staging/media/solo6x10/solo6x10-disp.c @@ -176,18 +176,27 @@ static void solo_vout_config(struct solo_dev *solo_dev) static int solo_dma_vin_region(struct solo_dev *solo_dev, u32 off, u16 val, int reg_size) { - u16 buf[64]; - int i; - int ret = 0; + u16 *buf; + const int n = 64, size = n * sizeof(*buf); + int i, ret = 0; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; - for (i = 0; i < sizeof(buf) >> 1; i++) + for (i = 0; i < n; i++) buf[i] = cpu_to_le16(val); - for (i = 0; i < reg_size; i += sizeof(buf)) - ret |= solo_p2m_dma(solo_dev, 1, buf, - SOLO_MOTION_EXT_ADDR(solo_dev) + off + i, - sizeof(buf), 0, 0); + for (i = 0; i < reg_size; i += size) { + ret = solo_p2m_dma(solo_dev, 1, buf, + SOLO_MOTION_EXT_ADDR(solo_dev) + off + i, + size, 0, 0); + + if (ret) + break; + } + kfree(buf); return ret; } diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c index a4c589604b02..d582c5b84c14 100644 --- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c +++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c @@ -95,38 +95,11 @@ static unsigned char vop_6110_pal_cif[] = { 0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00, }; -struct vop_header { - /* VE_STATUS0 */ - u32 mpeg_size:20, sad_motion_flag:1, video_motion_flag:1, vop_type:2, - channel:5, source_fl:1, interlace:1, progressive:1; - - /* VE_STATUS1 */ - u32 vsize:8, hsize:8, last_queue:4, nop0:8, scale:4; - - /* VE_STATUS2 */ - u32 mpeg_off; - - /* VE_STATUS3 */ - u32 jpeg_off; - - /* VE_STATUS4 */ - u32 jpeg_size:20, interval:10, nop1:2; - - /* VE_STATUS5/6 */ - u32 sec, usec; - - /* VE_STATUS7/8/9 */ - u32 nop2[3]; - - /* VE_STATUS10 */ - u32 mpeg_size_alt:20, nop3:12; - - u32 end_nops[5]; -} __packed; +typedef __le32 vop_header[16]; struct solo_enc_buf { enum solo_enc_types type; - struct vop_header *vh; + const vop_header *vh; int motion; }; @@ -346,7 +319,7 @@ static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma, /* Build a descriptor queue out of an SG list and send it to the P2M for * processing. */ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, - struct vb2_dma_sg_desc *vbuf, int off, int size, + struct sg_table *vbuf, int off, int size, unsigned int base, unsigned int base_size) { struct solo_dev *solo_dev = solo_enc->solo_dev; @@ -359,7 +332,7 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, solo_enc->desc_count = 1; - for_each_sg(vbuf->sglist, sg, vbuf->num_pages, i) { + for_each_sg(vbuf->sgl, sg, vbuf->nents, i) { struct solo_p2m_desc *desc; dma_addr_t dma; int len; @@ -430,84 +403,145 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, solo_enc->desc_count - 1); } +/* Extract values from VOP header - VE_STATUSxx */ +static inline int vop_interlaced(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[0]) >> 30) & 1; +} + +static inline u8 vop_channel(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[0]) >> 24) & 0x1F; +} + +static inline u8 vop_type(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[0]) >> 22) & 3; +} + +static inline u32 vop_mpeg_size(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[0]) & 0xFFFFF; +} + +static inline u8 vop_hsize(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[1]) >> 8) & 0xFF; +} + +static inline u8 vop_vsize(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[1]) & 0xFF; +} + +static inline u32 vop_mpeg_offset(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[2]); +} + +static inline u32 vop_jpeg_offset(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[3]); +} + +static inline u32 vop_jpeg_size(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[4]) & 0xFFFFF; +} + +static inline u32 vop_sec(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[5]); +} + +static inline u32 vop_usec(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[6]); +} + static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, - struct vb2_buffer *vb, struct vop_header *vh) + struct vb2_buffer *vb, const vop_header *vh) { struct solo_dev *solo_dev = solo_enc->solo_dev; - struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); int frame_size; int ret; vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; - if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len) + if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len) return -EIO; - sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages, - solo_enc->jpeg_header, - solo_enc->jpeg_len); - - frame_size = (vh->jpeg_size + solo_enc->jpeg_len + (DMA_ALIGN - 1)) + frame_size = (vop_jpeg_size(vh) + solo_enc->jpeg_len + (DMA_ALIGN - 1)) & ~(DMA_ALIGN - 1); - vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len); + vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len); - dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + /* may discard all previous data in vbuf->sgl */ + dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); - ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off, - frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), - SOLO_JPEG_EXT_SIZE(solo_dev)); - dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, + vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev), + frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), + SOLO_JPEG_EXT_SIZE(solo_dev)); + dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); + + /* add the header only after dma_unmap_sg() */ + sg_copy_from_buffer(vbuf->sgl, vbuf->nents, + solo_enc->jpeg_header, solo_enc->jpeg_len); + return ret; } static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, - struct vb2_buffer *vb, struct vop_header *vh) + struct vb2_buffer *vb, const vop_header *vh) { struct solo_dev *solo_dev = solo_enc->solo_dev; - struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); int frame_off, frame_size; int skip = 0; int ret; - if (vb2_plane_size(vb, 0) < vh->mpeg_size) + if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh)) return -EIO; /* If this is a key frame, add extra header */ - if (!vh->vop_type) { - sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages, - solo_enc->vop, - solo_enc->vop_len); - + vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME); + if (!vop_type(vh)) { skip = solo_enc->vop_len; - vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; - vb2_set_plane_payload(vb, 0, vh->mpeg_size + solo_enc->vop_len); + vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) + solo_enc->vop_len); } else { vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME; - vb2_set_plane_payload(vb, 0, vh->mpeg_size); + vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh)); } /* Now get the actual mpeg payload */ - frame_off = (vh->mpeg_off + sizeof(*vh)) + frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) + sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev); - frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1)) + frame_size = (vop_mpeg_size(vh) + skip + (DMA_ALIGN - 1)) & ~(DMA_ALIGN - 1); - dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + /* may discard all previous data in vbuf->sgl */ + dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size, SOLO_MP4E_EXT_ADDR(solo_dev), SOLO_MP4E_EXT_SIZE(solo_dev)); - dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); + + /* add the header only after dma_unmap_sg() */ + if (!vop_type(vh)) + sg_copy_from_buffer(vbuf->sgl, vbuf->nents, + solo_enc->vop, solo_enc->vop_len); return ret; } static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, struct vb2_buffer *vb, struct solo_enc_buf *enc_buf) { - struct vop_header *vh = enc_buf->vh; + const vop_header *vh = enc_buf->vh; int ret; /* Check for motion flags */ @@ -531,8 +565,8 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, if (!ret) { vb->v4l2_buf.sequence = solo_enc->sequence++; - vb->v4l2_buf.timestamp.tv_sec = vh->sec; - vb->v4l2_buf.timestamp.tv_usec = vh->usec; + vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh); + vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh); } vb2_buffer_done(vb, ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); @@ -605,15 +639,13 @@ static void solo_handle_ring(struct solo_dev *solo_dev) /* FAIL... */ if (enc_get_mpeg_dma(solo_dev, solo_dev->vh_dma, off, - sizeof(struct vop_header))) + sizeof(vop_header))) continue; - enc_buf.vh = (struct vop_header *)solo_dev->vh_buf; - enc_buf.vh->mpeg_off -= SOLO_MP4E_EXT_ADDR(solo_dev); - enc_buf.vh->jpeg_off -= SOLO_JPEG_EXT_ADDR(solo_dev); + enc_buf.vh = solo_dev->vh_buf; /* Sanity check */ - if (enc_buf.vh->mpeg_off != off) + if (vop_mpeg_offset(enc_buf.vh) != SOLO_MP4E_EXT_ADDR(solo_dev) + off) continue; if (solo_motion_detected(solo_enc)) @@ -1329,7 +1361,7 @@ int solo_enc_v4l2_init(struct solo_dev *solo_dev, unsigned nr) init_waitqueue_head(&solo_dev->ring_thread_wait); - solo_dev->vh_size = sizeof(struct vop_header); + solo_dev->vh_size = sizeof(vop_header); solo_dev->vh_buf = pci_alloc_consistent(solo_dev->pdev, solo_dev->vh_size, &solo_dev->vh_dma); diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h index 6f91d2e34b2a..f1bbb8cb74e6 100644 --- a/drivers/staging/media/solo6x10/solo6x10.h +++ b/drivers/staging/media/solo6x10/solo6x10.h @@ -94,7 +94,6 @@ #define SOLO_ENC_MODE_HD1 1 #define SOLO_ENC_MODE_D1 9 -#define SOLO_DEFAULT_GOP 30 #define SOLO_DEFAULT_QP 3 #ifndef V4L2_BUF_FLAG_MOTION_ON diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 03a567199bbe..f1d511a9475b 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1608,15 +1608,17 @@ exit: EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); #ifdef CONFIG_NET +static const struct genl_multicast_group thermal_event_mcgrps[] = { + { .name = THERMAL_GENL_MCAST_GROUP_NAME, }, +}; + static struct genl_family thermal_event_genl_family = { .id = GENL_ID_GENERATE, .name = THERMAL_GENL_FAMILY_NAME, .version = THERMAL_GENL_VERSION, .maxattr = THERMAL_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group thermal_event_mcgrp = { - .name = THERMAL_GENL_MCAST_GROUP_NAME, + .mcgrps = thermal_event_mcgrps, + .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps), }; int thermal_generate_netlink_event(struct thermal_zone_device *tz, @@ -1677,7 +1679,8 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz, return result; } - result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); + result = genlmsg_multicast(&thermal_event_genl_family, skb, 0, + 0, GFP_ATOMIC); if (result) dev_err(&tz->device, "Failed to send netlink event:%d", result); @@ -1687,17 +1690,7 @@ EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); static int genetlink_init(void) { - int result; - - result = genl_register_family(&thermal_event_genl_family); - if (result) - return result; - - result = genl_register_mc_group(&thermal_event_genl_family, - &thermal_event_mcgrp); - if (result) - genl_unregister_family(&thermal_event_genl_family); - return result; + return genl_register_family(&thermal_event_genl_family); } static void genetlink_exit(void) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 537750261aaa..7d8103cd3e2e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work) desc = s->desc_rx[new]; if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { /* Handle incomplete DMA receive */ struct dma_chan *chan = s->chan_rx; struct shdma_desc *sh_desc = container_of(desc, diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index db535b0aa172..fed7f68d025d 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -28,7 +28,7 @@ config USB_DEFAULT_PERSIST bool "Enable USB persist by default" default y help - Say N here if you don't want USB power session persistance + Say N here if you don't want USB power session persistence enabled by default. If you say N it will make suspended USB devices that lose power get reenumerated as if they had been unplugged, causing any mounted filesystems to be lost. The diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 06cec635e703..a7c04e24ca48 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5501,6 +5501,6 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, if (!hub) return NULL; - return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev); + return ACPI_HANDLE(&hub->ports[port1 - 1]->dev); } #endif diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 255c14464bf2..4e243c37f17f 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -173,7 +173,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) } /* root hub's parent is the usb hcd. */ - parent_handle = DEVICE_ACPI_HANDLE(dev->parent); + parent_handle = ACPI_HANDLE(dev->parent); *handle = acpi_get_child(parent_handle, udev->portnum); if (!*handle) return -ENODEV; @@ -194,7 +194,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) raw_port_num = usb_hcd_find_raw_port_number(hcd, port_num); - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), + *handle = acpi_get_child(ACPI_HANDLE(&udev->dev), raw_port_num); if (!*handle) return -ENODEV; diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 36db5d98dd2f..fb80d68f4d33 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#include <linux/gpio.h> +#include <linux/of_gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -19,6 +21,7 @@ #include <linux/err.h> #include <linux/pwm.h> #include <linux/pwm_backlight.h> +#include <linux/regulator/consumer.h> #include <linux/slab.h> struct pwm_bl_data { @@ -27,6 +30,11 @@ struct pwm_bl_data { unsigned int period; unsigned int lth_brightness; unsigned int *levels; + bool enabled; + struct regulator *power_supply; + int enable_gpio; + unsigned long enable_gpio_flags; + unsigned int scale; int (*notify)(struct device *, int brightness); void (*notify_after)(struct device *, @@ -35,11 +43,65 @@ struct pwm_bl_data { void (*exit)(struct device *); }; +static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness) +{ + int err; + + if (pb->enabled) + return; + + err = regulator_enable(pb->power_supply); + if (err < 0) + dev_err(pb->dev, "failed to enable power supply\n"); + + if (gpio_is_valid(pb->enable_gpio)) { + if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW) + gpio_set_value(pb->enable_gpio, 0); + else + gpio_set_value(pb->enable_gpio, 1); + } + + pwm_enable(pb->pwm); + pb->enabled = true; +} + +static void pwm_backlight_power_off(struct pwm_bl_data *pb) +{ + if (!pb->enabled) + return; + + pwm_config(pb->pwm, 0, pb->period); + pwm_disable(pb->pwm); + + if (gpio_is_valid(pb->enable_gpio)) { + if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW) + gpio_set_value(pb->enable_gpio, 1); + else + gpio_set_value(pb->enable_gpio, 0); + } + + regulator_disable(pb->power_supply); + pb->enabled = false; +} + +static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) +{ + unsigned int lth = pb->lth_brightness; + int duty_cycle; + + if (pb->levels) + duty_cycle = pb->levels[brightness]; + else + duty_cycle = brightness; + + return (duty_cycle * (pb->period - lth) / pb->scale) + lth; +} + static int pwm_backlight_update_status(struct backlight_device *bl) { struct pwm_bl_data *pb = bl_get_data(bl); int brightness = bl->props.brightness; - int max = bl->props.max_brightness; + int duty_cycle; if (bl->props.power != FB_BLANK_UNBLANK || bl->props.fb_blank != FB_BLANK_UNBLANK || @@ -49,24 +111,12 @@ static int pwm_backlight_update_status(struct backlight_device *bl) if (pb->notify) brightness = pb->notify(pb->dev, brightness); - if (brightness == 0) { - pwm_config(pb->pwm, 0, pb->period); - pwm_disable(pb->pwm); - } else { - int duty_cycle; - - if (pb->levels) { - duty_cycle = pb->levels[brightness]; - max = pb->levels[max]; - } else { - duty_cycle = brightness; - } - - duty_cycle = pb->lth_brightness + - (duty_cycle * (pb->period - pb->lth_brightness) / max); + if (brightness > 0) { + duty_cycle = compute_duty_cycle(pb, brightness); pwm_config(pb->pwm, duty_cycle, pb->period); - pwm_enable(pb->pwm); - } + pwm_backlight_power_on(pb, brightness); + } else + pwm_backlight_power_off(pb); if (pb->notify_after) pb->notify_after(pb->dev, brightness); @@ -98,6 +148,7 @@ static int pwm_backlight_parse_dt(struct device *dev, struct platform_pwm_backlight_data *data) { struct device_node *node = dev->of_node; + enum of_gpio_flags flags; struct property *prop; int length; u32 value; @@ -138,11 +189,13 @@ static int pwm_backlight_parse_dt(struct device *dev, data->max_brightness--; } - /* - * TODO: Most users of this driver use a number of GPIOs to control - * backlight power. Support for specifying these needs to be - * added. - */ + data->enable_gpio = of_get_named_gpio_flags(node, "enable-gpios", 0, + &flags); + if (data->enable_gpio == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (gpio_is_valid(data->enable_gpio) && (flags & OF_GPIO_ACTIVE_LOW)) + data->enable_gpio_flags |= PWM_BACKLIGHT_GPIO_ACTIVE_LOW; return 0; } @@ -168,7 +221,6 @@ static int pwm_backlight_probe(struct platform_device *pdev) struct backlight_properties props; struct backlight_device *bl; struct pwm_bl_data *pb; - unsigned int max; int ret; if (!data) { @@ -195,16 +247,46 @@ static int pwm_backlight_probe(struct platform_device *pdev) } if (data->levels) { - max = data->levels[data->max_brightness]; + unsigned int i; + + for (i = 0; i <= data->max_brightness; i++) + if (data->levels[i] > pb->scale) + pb->scale = data->levels[i]; + pb->levels = data->levels; } else - max = data->max_brightness; + pb->scale = data->max_brightness; + pb->enable_gpio = data->enable_gpio; + pb->enable_gpio_flags = data->enable_gpio_flags; pb->notify = data->notify; pb->notify_after = data->notify_after; pb->check_fb = data->check_fb; pb->exit = data->exit; pb->dev = &pdev->dev; + pb->enabled = false; + + if (gpio_is_valid(pb->enable_gpio)) { + unsigned long flags; + + if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW) + flags = GPIOF_OUT_INIT_HIGH; + else + flags = GPIOF_OUT_INIT_LOW; + + ret = gpio_request_one(pb->enable_gpio, flags, "enable"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n", + pb->enable_gpio, ret); + goto err_alloc; + } + } + + pb->power_supply = devm_regulator_get(&pdev->dev, "power"); + if (IS_ERR(pb->power_supply)) { + ret = PTR_ERR(pb->power_supply); + goto err_gpio; + } pb->pwm = devm_pwm_get(&pdev->dev, NULL); if (IS_ERR(pb->pwm)) { @@ -214,7 +296,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) if (IS_ERR(pb->pwm)) { dev_err(&pdev->dev, "unable to request legacy PWM\n"); ret = PTR_ERR(pb->pwm); - goto err_alloc; + goto err_gpio; } } @@ -229,7 +311,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) pwm_set_period(pb->pwm, data->pwm_period_ns); pb->period = pwm_get_period(pb->pwm); - pb->lth_brightness = data->lth_brightness * (pb->period / max); + pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; @@ -239,7 +321,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); ret = PTR_ERR(bl); - goto err_alloc; + goto err_gpio; } if (data->dft_brightness > data->max_brightness) { @@ -255,6 +337,9 @@ static int pwm_backlight_probe(struct platform_device *pdev) platform_set_drvdata(pdev, bl); return 0; +err_gpio: + if (gpio_is_valid(pb->enable_gpio)) + gpio_free(pb->enable_gpio); err_alloc: if (data->exit) data->exit(&pdev->dev); @@ -267,10 +352,11 @@ static int pwm_backlight_remove(struct platform_device *pdev) struct pwm_bl_data *pb = bl_get_data(bl); backlight_device_unregister(bl); - pwm_config(pb->pwm, 0, pb->period); - pwm_disable(pb->pwm); + pwm_backlight_power_off(pb); + if (pb->exit) pb->exit(&pdev->dev); + return 0; } @@ -282,10 +368,12 @@ static int pwm_backlight_suspend(struct device *dev) if (pb->notify) pb->notify(pb->dev, 0); - pwm_config(pb->pwm, 0, pb->period); - pwm_disable(pb->pwm); + + pwm_backlight_power_off(pb); + if (pb->notify_after) pb->notify_after(pb->dev, 0); + return 0; } @@ -294,12 +382,19 @@ static int pwm_backlight_resume(struct device *dev) struct backlight_device *bl = dev_get_drvdata(dev); backlight_update_status(bl); + return 0; } #endif -static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend, - pwm_backlight_resume); +static const struct dev_pm_ops pwm_backlight_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = pwm_backlight_suspend, + .resume = pwm_backlight_resume, + .poweroff = pwm_backlight_suspend, + .restore = pwm_backlight_resume, +#endif +}; static struct platform_driver pwm_backlight_driver = { .driver = { @@ -317,4 +412,3 @@ module_platform_driver(pwm_backlight_driver); MODULE_DESCRIPTION("PWM based Backlight Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pwm-backlight"); - diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c index 00b3a52c1d68..cee9602f9a7b 100644 --- a/drivers/video/exynos/exynos_mipi_dsi.c +++ b/drivers/video/exynos/exynos_mipi_dsi.c @@ -141,7 +141,6 @@ static int exynos_mipi_dsi_early_blank_mode(struct mipi_dsim_device *dsim, static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power) { - struct platform_device *pdev = to_platform_device(dsim->dev); struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 6df632e0bb55..5be6e919f785 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -392,6 +392,25 @@ config RETU_WATCHDOG To compile this driver as a module, choose M here: the module will be called retu_wdt. +config MOXART_WDT + tristate "MOXART watchdog" + depends on ARCH_MOXART + help + Say Y here to include Watchdog timer support for the watchdog + existing on the MOXA ART SoC series platforms. + + To compile this driver as a module, choose M here: the + module will be called moxart_wdt. + +config SIRFSOC_WATCHDOG + tristate "SiRFSOC watchdog" + depends on ARCH_SIRF + select WATCHDOG_CORE + default y + help + Support for CSR SiRFprimaII and SiRFatlasVI watchdog. When + the watchdog triggers the system will be reset. + # AVR32 Architecture config AT32AP700X_WDT @@ -866,6 +885,7 @@ config VIA_WDT config W83627HF_WDT tristate "W83627HF/W83627DHG Watchdog Timer" depends on X86 + select WATCHDOG_CORE ---help--- This is the driver for the hardware watchdog on the W83627HF chipset as used in Advantech PC-9578 and Tyan S2721-533 motherboards @@ -1125,6 +1145,13 @@ config LANTIQ_WDT help Hardware driver for the Lantiq SoC Watchdog Timer. +config RALINK_WDT + tristate "Ralink SoC watchdog" + select WATCHDOG_CORE + depends on RALINK + help + Hardware driver for the Ralink SoC Watchdog Timer. + # PARISC Architecture # POWERPC Architecture diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 8c7b8bcbbdc5..91bd95a64baf 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -55,6 +55,8 @@ obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o +obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o +obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o @@ -134,6 +136,7 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o +obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o # PARISC Architecture diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c index 24a517777fa0..5cf1621def9c 100644 --- a/drivers/watchdog/acquirewdt.c +++ b/drivers/watchdog/acquirewdt.c @@ -60,8 +60,7 @@ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/fs.h> /* For file operations */ #include <linux/ioport.h> /* For io-port access */ @@ -337,4 +336,3 @@ module_exit(acq_exit); MODULE_AUTHOR("David Woodhouse"); MODULE_DESCRIPTION("Acquire Inc. Single Board Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c index cc6702fc5268..a8961addc59c 100644 --- a/drivers/watchdog/advantechwdt.c +++ b/drivers/watchdog/advantechwdt.c @@ -345,4 +345,3 @@ module_exit(advwdt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marek Michalkiewicz <marekm@linux.org.pl>"); MODULE_DESCRIPTION("Advantech Single Board Computer WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c index 41b84936a521..fbb7b94cabfd 100644 --- a/drivers/watchdog/alim1535_wdt.c +++ b/drivers/watchdog/alim1535_wdt.c @@ -452,4 +452,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("ALi M1535 PMU Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c index 5eee55012e33..12f0b762b528 100644 --- a/drivers/watchdog/alim7101_wdt.c +++ b/drivers/watchdog/alim7101_wdt.c @@ -425,4 +425,3 @@ MODULE_DEVICE_TABLE(pci, alim7101_pci_tbl); MODULE_AUTHOR("Steve Hill"); MODULE_DESCRIPTION("ALi M7101 PMU Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index b3709f9cf5be..3a996576343a 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -46,7 +46,6 @@ MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>"); MODULE_DESCRIPTION(LONGNAME); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int margin = 60; module_param(margin, int, 0); diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c index b178e717ef09..afe7d17e6776 100644 --- a/drivers/watchdog/at32ap700x_wdt.c +++ b/drivers/watchdog/at32ap700x_wdt.c @@ -434,4 +434,3 @@ module_platform_driver_probe(at32_wdt_driver, at32_wdt_probe); MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>"); MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c index 1c75260b987c..dee6cc21d270 100644 --- a/drivers/watchdog/at91rm9200_wdt.c +++ b/drivers/watchdog/at91rm9200_wdt.c @@ -269,7 +269,7 @@ static struct platform_driver at91wdt_driver = { .driver = { .name = "at91_wdt", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(at91_wdt_dt_ids), + .of_match_table = at91_wdt_dt_ids, }, }; @@ -297,5 +297,4 @@ module_exit(at91_wdt_exit); MODULE_AUTHOR("Andrew Victor"); MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:at91_wdt"); diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c index 37cb09b27b63..9fa1f69dac13 100644 --- a/drivers/watchdog/ath79_wdt.c +++ b/drivers/watchdog/ath79_wdt.c @@ -329,4 +329,3 @@ MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org"); MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 61566fc47f84..a6a2cebb2587 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -186,4 +186,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>"); MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c index a14a58d9d110..4eb188b87f8e 100644 --- a/drivers/watchdog/bcm63xx_wdt.c +++ b/drivers/watchdog/bcm63xx_wdt.c @@ -317,5 +317,4 @@ MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:bcm63xx-wdt"); diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index 5d36d6fb4969..a3b6a5b30f9f 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c @@ -465,7 +465,6 @@ module_exit(bfin_wdt_exit); MODULE_AUTHOR("Michele d'Amico, Mike Frysinger <vapier@gentoo.org>"); MODULE_DESCRIPTION("Blackfin Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(timeout, uint, 0); MODULE_PARM_DESC(timeout, diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c index f270bb7bc456..f7ae49edb518 100644 --- a/drivers/watchdog/cpu5wdt.c +++ b/drivers/watchdog/cpu5wdt.c @@ -289,7 +289,6 @@ MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>"); MODULE_DESCRIPTION("sma cpu5 watchdog driver"); MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(port, int, 0); MODULE_PARM_DESC(port, "base address of watchdog card, default is 0x91"); diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index bead7740c86a..dd625cca1ae5 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -267,5 +267,4 @@ MODULE_PARM_DESC(heartbeat, __MODULE_STRING(DEFAULT_HEARTBEAT)); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:watchdog"); diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index e621098bf663..a46f5c7ee7ff 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -29,6 +29,7 @@ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/of.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/spinlock.h> @@ -203,12 +204,12 @@ static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case WDIOC_GETSUPPORT: - return copy_to_user((struct watchdog_info *)arg, &dw_wdt_ident, + return copy_to_user((void __user *)arg, &dw_wdt_ident, sizeof(dw_wdt_ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: - return put_user(0, (int *)arg); + return put_user(0, (int __user *)arg); case WDIOC_KEEPALIVE: dw_wdt_set_next_heartbeat(); @@ -252,17 +253,17 @@ static int dw_wdt_release(struct inode *inode, struct file *filp) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int dw_wdt_suspend(struct device *dev) { - clk_disable(dw_wdt.clk); + clk_disable_unprepare(dw_wdt.clk); return 0; } static int dw_wdt_resume(struct device *dev) { - int err = clk_enable(dw_wdt.clk); + int err = clk_prepare_enable(dw_wdt.clk); if (err) return err; @@ -271,12 +272,9 @@ static int dw_wdt_resume(struct device *dev) return 0; } +#endif /* CONFIG_PM_SLEEP */ -static const struct dev_pm_ops dw_wdt_pm_ops = { - .suspend = dw_wdt_suspend, - .resume = dw_wdt_resume, -}; -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume); static const struct file_operations wdt_fops = { .owner = THIS_MODULE, @@ -309,7 +307,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) if (IS_ERR(dw_wdt.clk)) return PTR_ERR(dw_wdt.clk); - ret = clk_enable(dw_wdt.clk); + ret = clk_prepare_enable(dw_wdt.clk); if (ret) return ret; @@ -326,7 +324,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) return 0; out_disable_clk: - clk_disable(dw_wdt.clk); + clk_disable_unprepare(dw_wdt.clk); return ret; } @@ -335,20 +333,27 @@ static int dw_wdt_drv_remove(struct platform_device *pdev) { misc_deregister(&dw_wdt_miscdev); - clk_disable(dw_wdt.clk); + clk_disable_unprepare(dw_wdt.clk); return 0; } +#ifdef CONFIG_OF +static const struct of_device_id dw_wdt_of_match[] = { + { .compatible = "snps,dw-wdt", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, dw_wdt_of_match); +#endif + static struct platform_driver dw_wdt_driver = { .probe = dw_wdt_drv_probe, .remove = dw_wdt_drv_remove, .driver = { .name = "dw_wdt", .owner = THIS_MODULE, -#ifdef CONFIG_PM + .of_match_table = of_match_ptr(dw_wdt_of_match), .pm = &dw_wdt_pm_ops, -#endif /* CONFIG_PM */ }, }; @@ -357,4 +362,3 @@ module_platform_driver(dw_wdt_driver); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index e0574844c313..833e81311848 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c @@ -179,4 +179,3 @@ MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>," MODULE_DESCRIPTION("EP93xx Watchdog"); MODULE_LICENSE("GPL"); MODULE_VERSION(WDT_VERSION); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c index cd31b8a2a729..23ee53240c4c 100644 --- a/drivers/watchdog/eurotechwdt.c +++ b/drivers/watchdog/eurotechwdt.c @@ -477,4 +477,3 @@ module_exit(eurwdt_exit); MODULE_AUTHOR("Rodolfo Giometti"); MODULE_DESCRIPTION("Driver for Eurotech CPU-1220/1410 on board watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index 3755833430dc..25beb30878d7 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -331,5 +331,4 @@ module_exit(gef_wdt_exit); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>"); MODULE_DESCRIPTION("GE watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:gef_wdt"); diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c index fcd599d4e225..4a6ae84b42bc 100644 --- a/drivers/watchdog/geodewdt.c +++ b/drivers/watchdog/geodewdt.c @@ -297,4 +297,3 @@ module_exit(geodewdt_exit); MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 19f3c3fc65f4..45b979d9dd13 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -881,7 +881,6 @@ MODULE_AUTHOR("Tom Mingarelli"); MODULE_DESCRIPTION("hp watchdog driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(HPWDT_VERSION); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c index 2b2ea13d03ea..a72fe9361ddf 100644 --- a/drivers/watchdog/i6300esb.c +++ b/drivers/watchdog/i6300esb.c @@ -497,4 +497,3 @@ module_pci_driver(esb_driver); MODULE_AUTHOR("Ross Biro and David Härdeman"); MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 6130321da387..04f8af65acfd 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -56,8 +56,6 @@ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ @@ -394,7 +392,7 @@ static int iTCO_wdt_probe(struct platform_device *dev) { int ret = -ENODEV; unsigned long val32; - struct lpc_ich_info *ich_info = dev->dev.platform_data; + struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev); if (!ich_info) goto out; @@ -582,5 +580,4 @@ MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c index eb6b5cc98ec6..7ae36690c449 100644 --- a/drivers/watchdog/ib700wdt.c +++ b/drivers/watchdog/ib700wdt.c @@ -382,6 +382,5 @@ module_exit(ibwdt_exit); MODULE_AUTHOR("Charles Howes <chowes@vsol.net>"); MODULE_DESCRIPTION("IB700 SBC watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* end of ib700wdt.c */ diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c index bc3fb8fe89ab..db0a34460e57 100644 --- a/drivers/watchdog/ibmasr.c +++ b/drivers/watchdog/ibmasr.c @@ -419,4 +419,3 @@ MODULE_PARM_DESC(nowayout, MODULE_DESCRIPTION("IBM Automatic Server Restart driver"); MODULE_AUTHOR("Andrey Panin"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c index e24ef6a6e064..70a240297c6d 100644 --- a/drivers/watchdog/ie6xx_wdt.c +++ b/drivers/watchdog/ie6xx_wdt.c @@ -344,5 +344,4 @@ module_exit(ie6xx_wdt_exit); MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>"); MODULE_DESCRIPTION("Intel Atom E6xx Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 693ac3f4de5a..b4786bccc42c 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -322,6 +322,7 @@ static const struct of_device_id imx2_wdt_dt_ids[] = { { .compatible = "fsl,imx21-wdt", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids); static struct platform_driver imx2_wdt_driver = { .remove = __exit_p(imx2_wdt_remove), @@ -338,5 +339,4 @@ module_platform_driver_probe(imx2_wdt_driver, imx2_wdt_probe); MODULE_AUTHOR("Wolfram Sang"); MODULE_DESCRIPTION("Watchdog driver for IMX2 and later"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c index 6d90f7a2ce22..1b5c25a47b87 100644 --- a/drivers/watchdog/indydog.c +++ b/drivers/watchdog/indydog.c @@ -214,4 +214,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>"); MODULE_DESCRIPTION("Hardware Watchdog Device for SGI IP22"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c index 8ced25613956..e13e65e996aa 100644 --- a/drivers/watchdog/intel_scu_watchdog.c +++ b/drivers/watchdog/intel_scu_watchdog.c @@ -564,5 +564,4 @@ module_exit(intel_scu_watchdog_exit); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_VERSION(WDT_VER); diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c index d964faf1a250..b16013ffacc2 100644 --- a/drivers/watchdog/iop_wdt.c +++ b/drivers/watchdog/iop_wdt.c @@ -259,4 +259,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_AUTHOR("Curt E Bruns <curt.e.bruns@intel.com>"); MODULE_DESCRIPTION("iop watchdog timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c index f4cce6d66a55..41b3979a9d87 100644 --- a/drivers/watchdog/it8712f_wdt.c +++ b/drivers/watchdog/it8712f_wdt.c @@ -41,7 +41,6 @@ MODULE_AUTHOR("Jorge Boncompte - DTI2 <jorge@dti2.net>"); MODULE_DESCRIPTION("IT8712F Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int max_units = 255; static int margin = 60; /* in seconds */ diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index d3dcc6988b5f..e2bba68ae71e 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -772,4 +772,3 @@ module_exit(it87_wdt_exit); MODULE_AUTHOR("Oliver Schuster"); MODULE_DESCRIPTION("Hardware Watchdog Device Driver for IT87xx EC-LPC I/O"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c index 5580b4fff7fe..f20cc53ff719 100644 --- a/drivers/watchdog/ixp4xx_wdt.c +++ b/drivers/watchdog/ixp4xx_wdt.c @@ -208,5 +208,3 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c index d1afdf684c18..2de486a7eea1 100644 --- a/drivers/watchdog/jz4740_wdt.c +++ b/drivers/watchdog/jz4740_wdt.c @@ -222,5 +222,4 @@ module_platform_driver(jz4740_wdt_driver); MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); MODULE_DESCRIPTION("jz4740 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:jz4740-wdt"); diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index 5c3d4df63e68..a1a3638c579c 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c @@ -67,7 +67,7 @@ enum { PRESCALER_12, }; -const u32 kempld_prescaler[] = { +static const u32 kempld_prescaler[] = { [PRESCALER_21] = (1 << 21) - 1, [PRESCALER_17] = (1 << 17) - 1, [PRESCALER_12] = (1 << 12) - 1, @@ -361,7 +361,7 @@ static long kempld_wdt_ioctl(struct watchdog_device *wdd, unsigned int cmd, ret = kempld_wdt_keepalive(wdd); break; case WDIOC_GETPRETIMEOUT: - ret = put_user(wdt_data->pretimeout, (int *)arg); + ret = put_user(wdt_data->pretimeout, (int __user *)arg); break; } @@ -578,4 +578,3 @@ module_platform_driver(kempld_wdt_driver); MODULE_DESCRIPTION("KEM PLD Watchdog Driver"); MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index dce9ecffd44a..40ca5594a336 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c @@ -323,5 +323,4 @@ module_exit(ks8695_wdt_exit); MODULE_AUTHOR("Andrew Victor"); MODULE_DESCRIPTION("Watchdog driver for KS8695"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:ks8695_wdt"); diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c index 088fd0c9d888..3b3148c764a3 100644 --- a/drivers/watchdog/lantiq_wdt.c +++ b/drivers/watchdog/lantiq_wdt.c @@ -249,4 +249,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); MODULE_DESCRIPTION("Lantiq SoC Watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c index 173494a681e6..da6fa2b68074 100644 --- a/drivers/watchdog/m54xx_wdt.c +++ b/drivers/watchdog/m54xx_wdt.c @@ -223,4 +223,3 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index bf84f788e592..9826b59ef734 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c @@ -92,7 +92,6 @@ static unsigned short zf_readw(unsigned char port) MODULE_AUTHOR("Fernando Fuganti <fuganti@conectiva.com.br>"); MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index cc9d328086ed..6d4f3998e1f6 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -258,4 +258,3 @@ MODULE_PARM_DESC(nodelay, "(max6373/74 only, default=0)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c index 97d62ee50341..be86ea359eee 100644 --- a/drivers/watchdog/mixcomwd.c +++ b/drivers/watchdog/mixcomwd.c @@ -315,4 +315,3 @@ MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>"); MODULE_DESCRIPTION("MixCom Watchdog driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c new file mode 100644 index 000000000000..4166e4d116a8 --- /dev/null +++ b/drivers/watchdog/moxart_wdt.c @@ -0,0 +1,165 @@ +/* + * MOXA ART SoCs watchdog driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> +#include <linux/moduleparam.h> + +#define REG_COUNT 0x4 +#define REG_MODE 0x8 +#define REG_ENABLE 0xC + +struct moxart_wdt_dev { + struct watchdog_device dev; + void __iomem *base; + unsigned int clock_frequency; +}; + +static int heartbeat; + +static int moxart_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev); + + writel(0, moxart_wdt->base + REG_ENABLE); + + return 0; +} + +static int moxart_wdt_start(struct watchdog_device *wdt_dev) +{ + struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev); + + writel(moxart_wdt->clock_frequency * wdt_dev->timeout, + moxart_wdt->base + REG_COUNT); + writel(0x5ab9, moxart_wdt->base + REG_MODE); + writel(0x03, moxart_wdt->base + REG_ENABLE); + + return 0; +} + +static int moxart_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int timeout) +{ + wdt_dev->timeout = timeout; + + return 0; +} + +static const struct watchdog_info moxart_wdt_info = { + .identity = "moxart-wdt", + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, +}; + +static const struct watchdog_ops moxart_wdt_ops = { + .owner = THIS_MODULE, + .start = moxart_wdt_start, + .stop = moxart_wdt_stop, + .set_timeout = moxart_wdt_set_timeout, +}; + +static int moxart_wdt_probe(struct platform_device *pdev) +{ + struct moxart_wdt_dev *moxart_wdt; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + struct clk *clk; + int err; + unsigned int max_timeout; + bool nowayout = WATCHDOG_NOWAYOUT; + + moxart_wdt = devm_kzalloc(dev, sizeof(*moxart_wdt), GFP_KERNEL); + if (!moxart_wdt) + return -ENOMEM; + + platform_set_drvdata(pdev, moxart_wdt); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + moxart_wdt->base = devm_ioremap_resource(dev, res); + if (IS_ERR(moxart_wdt->base)) + return PTR_ERR(moxart_wdt->base); + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("%s: of_clk_get failed\n", __func__); + return PTR_ERR(clk); + } + + moxart_wdt->clock_frequency = clk_get_rate(clk); + if (moxart_wdt->clock_frequency == 0) { + pr_err("%s: incorrect clock frequency\n", __func__); + return -EINVAL; + } + + max_timeout = UINT_MAX / moxart_wdt->clock_frequency; + + moxart_wdt->dev.info = &moxart_wdt_info; + moxart_wdt->dev.ops = &moxart_wdt_ops; + moxart_wdt->dev.timeout = max_timeout; + moxart_wdt->dev.min_timeout = 1; + moxart_wdt->dev.max_timeout = max_timeout; + moxart_wdt->dev.parent = dev; + + watchdog_init_timeout(&moxart_wdt->dev, heartbeat, dev); + watchdog_set_nowayout(&moxart_wdt->dev, nowayout); + + watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt); + + err = watchdog_register_device(&moxart_wdt->dev); + if (err) + return err; + + dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n", + moxart_wdt->dev.timeout, nowayout); + + return 0; +} + +static int moxart_wdt_remove(struct platform_device *pdev) +{ + struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev); + + moxart_wdt_stop(&moxart_wdt->dev); + watchdog_unregister_device(&moxart_wdt->dev); + + return 0; +} + +static const struct of_device_id moxart_watchdog_match[] = { + { .compatible = "moxa,moxart-watchdog" }, + { }, +}; + +static struct platform_driver moxart_wdt_driver = { + .probe = moxart_wdt_probe, + .remove = moxart_wdt_remove, + .driver = { + .name = "moxart-watchdog", + .owner = THIS_MODULE, + .of_match_table = moxart_watchdog_match, + }, +}; +module_platform_driver(moxart_wdt_driver); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds"); + +MODULE_DESCRIPTION("MOXART watchdog driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>"); diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index d0ebebae607c..d82152077fd9 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -330,4 +330,3 @@ MODULE_AUTHOR("Dave Updegraff, Kumar Gala"); MODULE_DESCRIPTION("Driver for watchdog timer in MPC8xx/MPC83xx/MPC86xx " "uProcessors"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index b4341110ad4f..edb31ffd7927 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c @@ -257,5 +257,4 @@ module_platform_driver(mtx1_wdt_driver); MODULE_AUTHOR("Michael Stickel, Florian Fainelli"); MODULE_DESCRIPTION("Driver for the MTX-1 watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:mtx1-wdt"); diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c index e4cf98019265..f9fa58409396 100644 --- a/drivers/watchdog/mv64x60_wdt.c +++ b/drivers/watchdog/mv64x60_wdt.c @@ -255,7 +255,7 @@ static struct miscdevice mv64x60_wdt_miscdev = { static int mv64x60_wdt_probe(struct platform_device *dev) { - struct mv64x60_wdt_pdata *pdata = dev->dev.platform_data; + struct mv64x60_wdt_pdata *pdata = dev_get_platdata(&dev->dev); struct resource *r; int timeout = 10; @@ -323,5 +323,4 @@ module_exit(mv64x60_wdt_exit); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("MV64x60 watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" MV64x60_WDT_NAME); diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c index b15b6efd91a1..a0d893b0930e 100644 --- a/drivers/watchdog/nuc900_wdt.c +++ b/drivers/watchdog/nuc900_wdt.c @@ -307,5 +307,4 @@ module_platform_driver(nuc900wdt_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("Watchdog driver for NUC900"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:nuc900-wdt"); diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index 59cf19eeea07..231e5b9d5c8e 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c @@ -513,4 +513,3 @@ module_exit(nv_tco_cleanup_module); MODULE_AUTHOR("Mike Waychison"); MODULE_DESCRIPTION("TCO timer driver for NV chipsets"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c index 4dd281f2c33f..fb57103c8ebc 100644 --- a/drivers/watchdog/of_xilinx_wdt.c +++ b/drivers/watchdog/of_xilinx_wdt.c @@ -405,4 +405,3 @@ module_platform_driver(xwdt_driver); MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>"); MODULE_DESCRIPTION("Xilinx Watchdog driver"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index af88ffd1068f..09cf0135e8ac 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -68,14 +68,14 @@ static void omap_wdt_reload(struct omap_wdt_dev *wdev) void __iomem *base = wdev->base; /* wait for posted write to complete */ - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08) + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08) cpu_relax(); wdev->wdt_trgr_pattern = ~wdev->wdt_trgr_pattern; - __raw_writel(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR)); + writel_relaxed(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR)); /* wait for posted write to complete */ - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08) + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08) cpu_relax(); /* reloaded WCRR from WLDR */ } @@ -85,12 +85,12 @@ static void omap_wdt_enable(struct omap_wdt_dev *wdev) void __iomem *base = wdev->base; /* Sequence to enable the watchdog */ - __raw_writel(0xBBBB, base + OMAP_WATCHDOG_SPR); - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x10) + writel_relaxed(0xBBBB, base + OMAP_WATCHDOG_SPR); + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10) cpu_relax(); - __raw_writel(0x4444, base + OMAP_WATCHDOG_SPR); - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x10) + writel_relaxed(0x4444, base + OMAP_WATCHDOG_SPR); + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10) cpu_relax(); } @@ -99,12 +99,12 @@ static void omap_wdt_disable(struct omap_wdt_dev *wdev) void __iomem *base = wdev->base; /* sequence required to disable watchdog */ - __raw_writel(0xAAAA, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x10) + writel_relaxed(0xAAAA, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10) cpu_relax(); - __raw_writel(0x5555, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x10) + writel_relaxed(0x5555, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10) cpu_relax(); } @@ -115,11 +115,11 @@ static void omap_wdt_set_timer(struct omap_wdt_dev *wdev, void __iomem *base = wdev->base; /* just count up at 32 KHz */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04) cpu_relax(); - __raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR); - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) + writel_relaxed(pre_margin, base + OMAP_WATCHDOG_LDR); + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04) cpu_relax(); } @@ -135,11 +135,11 @@ static int omap_wdt_start(struct watchdog_device *wdog) pm_runtime_get_sync(wdev->dev); /* initialize prescaler */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01) cpu_relax(); - __raw_writel((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL); - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) + writel_relaxed((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL); + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01) cpu_relax(); omap_wdt_set_timer(wdev, wdog->timeout); @@ -205,7 +205,7 @@ static const struct watchdog_ops omap_wdt_ops = { static int omap_wdt_probe(struct platform_device *pdev) { - struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data; + struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev); struct watchdog_device *omap_wdt; struct resource *res, *mem; struct omap_wdt_dev *wdev; @@ -275,7 +275,7 @@ static int omap_wdt_probe(struct platform_device *pdev) } pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n", - __raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF, + readl_relaxed(wdev->base + OMAP_WATCHDOG_REV) & 0xFF, omap_wdt->timeout); pm_runtime_put_sync(wdev->dev); diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 4ea5fcccac02..44edca66d564 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -207,7 +207,7 @@ static struct platform_driver orion_wdt_driver = { .driver = { .owner = THIS_MODULE, .name = "orion_wdt", - .of_match_table = of_match_ptr(orion_wdt_of_match_table), + .of_match_table = orion_wdt_of_match_table, }, }; @@ -225,4 +225,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:orion_wdt"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c index 5afb89b48650..5211d56b3681 100644 --- a/drivers/watchdog/pc87413_wdt.c +++ b/drivers/watchdog/pc87413_wdt.c @@ -580,8 +580,6 @@ MODULE_AUTHOR("Sven Anders <anders@anduras.de>, " MODULE_DESCRIPTION("PC87413 WDT driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - module_param(io, int, 0); MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(IO_DEFAULT) ")."); diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c index 33e49a7f889f..e936f15dc7c7 100644 --- a/drivers/watchdog/pcwd.c +++ b/drivers/watchdog/pcwd.c @@ -61,7 +61,7 @@ #include <linux/delay.h> /* For mdelay function */ #include <linux/timer.h> /* For timer related operations */ #include <linux/jiffies.h> /* For jiffies stuff */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/reboot.h> /* For kernel_power_off() */ #include <linux/init.h> /* For __init/__exit/... */ @@ -1011,5 +1011,3 @@ MODULE_AUTHOR("Ken Hollis <kenji@bitgate.com>, " MODULE_DESCRIPTION("Berkshire ISA-PC Watchdog driver"); MODULE_VERSION(WATCHDOG_VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c index 7890f84edf76..b4864f254b48 100644 --- a/drivers/watchdog/pcwd_pci.c +++ b/drivers/watchdog/pcwd_pci.c @@ -40,7 +40,7 @@ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ @@ -820,5 +820,3 @@ module_pci_driver(pcipcwd_driver); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 7b14d1847927..b731b5d129be 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -32,7 +32,7 @@ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ @@ -72,8 +72,6 @@ do { \ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); /* Module Parameters */ module_param(debug, int, 0); @@ -235,13 +233,17 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, unsigned char cmd, unsigned char *msb, unsigned char *lsb) { int got_response, count; - unsigned char buf[6]; + unsigned char *buf; /* We will not send any commands if the USB PCWD device does * not exist */ if ((!usb_pcwd) || (!usb_pcwd->exists)) return -1; + buf = kmalloc(6, GFP_KERNEL); + if (buf == NULL) + return 0; + /* The USB PC Watchdog uses a 6 byte report format. * The board currently uses only 3 of the six bytes of the report. */ buf[0] = cmd; /* Byte 0 = CMD */ @@ -256,8 +258,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0), HID_REQ_SET_REPORT, HID_DT_REPORT, - 0x0200, usb_pcwd->interface_number, buf, sizeof(buf), - USB_COMMAND_TIMEOUT) != sizeof(buf)) { + 0x0200, usb_pcwd->interface_number, buf, 6, + USB_COMMAND_TIMEOUT) != 6) { dbg("usb_pcwd_send_command: error in usb_control_msg for " "cmd 0x%x 0x%x 0x%x\n", cmd, *msb, *lsb); } @@ -277,6 +279,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, *lsb = usb_pcwd->cmd_data_lsb; } + kfree(buf); + return got_response; } diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c index 329bc60ad7a2..0cdfee266690 100644 --- a/drivers/watchdog/pika_wdt.c +++ b/drivers/watchdog/pika_wdt.c @@ -299,5 +299,3 @@ module_exit(pikawdt_exit); MODULE_AUTHOR("Sean MacLennan <smaclennan@pikatech.com>"); MODULE_DESCRIPTION("PIKA FPGA based Watchdog Timer"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index b30bd430f591..1bdcc313e1d9 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -233,5 +233,4 @@ MODULE_PARM_DESC(nowayout, "Set to 1 to keep watchdog running after device release"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:pnx4008-watchdog"); diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c index 1b62a7dfcc95..882fdcb46ad1 100644 --- a/drivers/watchdog/pnx833x_wdt.c +++ b/drivers/watchdog/pnx833x_wdt.c @@ -278,4 +278,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Daniel Laird/Andre McCurdy"); MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index 9cf6bc7a234f..71e78ef4b736 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c @@ -25,8 +25,7 @@ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/fs.h> /* For file operations */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/platform_device.h> /* For platform_driver framework */ @@ -329,4 +328,3 @@ MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>," "Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index b0f116c2fd53..082d06262959 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -231,7 +231,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) struct resource *r; struct rdc321x_wdt_pdata *pdata; - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "no platform data supplied\n"); return -ENODEV; @@ -298,4 +298,3 @@ module_platform_driver(rdc321x_wdt_driver); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("RDC321x watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c new file mode 100644 index 000000000000..53d37fea183e --- /dev/null +++ b/drivers/watchdog/rt2880_wdt.c @@ -0,0 +1,207 @@ +/* + * Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer + * + * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + * + * This driver was based on: drivers/watchdog/softdog.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/reset.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/watchdog.h> +#include <linux/miscdevice.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#define SYSC_RSTSTAT 0x38 +#define WDT_RST_CAUSE BIT(1) + +#define RALINK_WDT_TIMEOUT 30 +#define RALINK_WDT_PRESCALE 65536 + +#define TIMER_REG_TMR1LOAD 0x00 +#define TIMER_REG_TMR1CTL 0x08 + +#define TMRSTAT_TMR1RST BIT(5) + +#define TMR1CTL_ENABLE BIT(7) +#define TMR1CTL_MODE_SHIFT 4 +#define TMR1CTL_MODE_MASK 0x3 +#define TMR1CTL_MODE_FREE_RUNNING 0x0 +#define TMR1CTL_MODE_PERIODIC 0x1 +#define TMR1CTL_MODE_TIMEOUT 0x2 +#define TMR1CTL_MODE_WDT 0x3 +#define TMR1CTL_PRESCALE_MASK 0xf +#define TMR1CTL_PRESCALE_65536 0xf + +static struct clk *rt288x_wdt_clk; +static unsigned long rt288x_wdt_freq; +static void __iomem *rt288x_wdt_base; + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static inline void rt_wdt_w32(unsigned reg, u32 val) +{ + iowrite32(val, rt288x_wdt_base + reg); +} + +static inline u32 rt_wdt_r32(unsigned reg) +{ + return ioread32(rt288x_wdt_base + reg); +} + +static int rt288x_wdt_ping(struct watchdog_device *w) +{ + rt_wdt_w32(TIMER_REG_TMR1LOAD, w->timeout * rt288x_wdt_freq); + + return 0; +} + +static int rt288x_wdt_start(struct watchdog_device *w) +{ + u32 t; + + t = rt_wdt_r32(TIMER_REG_TMR1CTL); + t &= ~(TMR1CTL_MODE_MASK << TMR1CTL_MODE_SHIFT | + TMR1CTL_PRESCALE_MASK); + t |= (TMR1CTL_MODE_WDT << TMR1CTL_MODE_SHIFT | + TMR1CTL_PRESCALE_65536); + rt_wdt_w32(TIMER_REG_TMR1CTL, t); + + rt288x_wdt_ping(w); + + t = rt_wdt_r32(TIMER_REG_TMR1CTL); + t |= TMR1CTL_ENABLE; + rt_wdt_w32(TIMER_REG_TMR1CTL, t); + + return 0; +} + +static int rt288x_wdt_stop(struct watchdog_device *w) +{ + u32 t; + + rt288x_wdt_ping(w); + + t = rt_wdt_r32(TIMER_REG_TMR1CTL); + t &= ~TMR1CTL_ENABLE; + rt_wdt_w32(TIMER_REG_TMR1CTL, t); + + return 0; +} + +static int rt288x_wdt_set_timeout(struct watchdog_device *w, unsigned int t) +{ + w->timeout = t; + rt288x_wdt_ping(w); + + return 0; +} + +static int rt288x_wdt_bootcause(void) +{ + if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE) + return WDIOF_CARDRESET; + + return 0; +} + +static struct watchdog_info rt288x_wdt_info = { + .identity = "Ralink Watchdog", + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, +}; + +static struct watchdog_ops rt288x_wdt_ops = { + .owner = THIS_MODULE, + .start = rt288x_wdt_start, + .stop = rt288x_wdt_stop, + .ping = rt288x_wdt_ping, + .set_timeout = rt288x_wdt_set_timeout, +}; + +static struct watchdog_device rt288x_wdt_dev = { + .info = &rt288x_wdt_info, + .ops = &rt288x_wdt_ops, + .min_timeout = 1, +}; + +static int rt288x_wdt_probe(struct platform_device *pdev) +{ + struct resource *res; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rt288x_wdt_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rt288x_wdt_base)) + return PTR_ERR(rt288x_wdt_base); + + rt288x_wdt_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(rt288x_wdt_clk)) + return PTR_ERR(rt288x_wdt_clk); + + device_reset(&pdev->dev); + + rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE; + + rt288x_wdt_dev.dev = &pdev->dev; + rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); + + rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); + rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout; + + watchdog_set_nowayout(&rt288x_wdt_dev, nowayout); + + ret = watchdog_register_device(&rt288x_wdt_dev); + if (!ret) + dev_info(&pdev->dev, "Initialized\n"); + + return 0; +} + +static int rt288x_wdt_remove(struct platform_device *pdev) +{ + watchdog_unregister_device(&rt288x_wdt_dev); + + return 0; +} + +static void rt288x_wdt_shutdown(struct platform_device *pdev) +{ + rt288x_wdt_stop(&rt288x_wdt_dev); +} + +static const struct of_device_id rt288x_wdt_match[] = { + { .compatible = "ralink,rt2880-wdt" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rt288x_wdt_match); + +static struct platform_driver rt288x_wdt_driver = { + .probe = rt288x_wdt_probe, + .remove = rt288x_wdt_remove, + .shutdown = rt288x_wdt_shutdown, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .of_match_table = rt288x_wdt_match, + }, +}; + +module_platform_driver(rt288x_wdt_driver); + +MODULE_DESCRIPTION("MediaTek/Ralink RT288x/RT3xxx hardware watchdog driver"); +MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 23aad7c6bf5d..7d8fd041ee25 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -29,7 +29,6 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> -#include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */ #include <linux/watchdog.h> #include <linux/init.h> #include <linux/platform_device.h> @@ -539,5 +538,4 @@ MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, " "Dimitry Andric <dimitry.andric@tomtom.com>"); MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:s3c2410-wdt"); diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index ccd6b29e21bf..e1d39a1e9628 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c @@ -193,4 +193,3 @@ module_param(margin, int, 0); MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index ea5d84a1fdad..3abae50773b8 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c @@ -341,7 +341,6 @@ MODULE_PARM_DESC(timeout, "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* * example code that can be put in a platform code area to utilize the diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c index 63632ec87c7e..2eef58a0cf05 100644 --- a/drivers/watchdog/sbc60xxwdt.c +++ b/drivers/watchdog/sbc60xxwdt.c @@ -387,4 +387,3 @@ module_exit(sbc60xxwdt_unload); MODULE_AUTHOR("Jakob Oestergaard <jakob@unthought.net>"); MODULE_DESCRIPTION("60xx Single Board Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c index 719edc8fdeb3..5f268add17ce 100644 --- a/drivers/watchdog/sbc7240_wdt.c +++ b/drivers/watchdog/sbc7240_wdt.c @@ -309,5 +309,3 @@ MODULE_AUTHOR("Gilles Gigan"); MODULE_DESCRIPTION("Watchdog device driver for single board" " computers EPIC Nano 7240 from iEi"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c index d4781e05f017..da60560ca446 100644 --- a/drivers/watchdog/sbc8360.c +++ b/drivers/watchdog/sbc8360.c @@ -404,6 +404,5 @@ MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>"); MODULE_DESCRIPTION("SBC8360 watchdog driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.01"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* end of sbc8360.c */ diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c index 0c3e9f66ef77..a1c502e0d8ec 100644 --- a/drivers/watchdog/sbc_epx_c3.c +++ b/drivers/watchdog/sbc_epx_c3.c @@ -220,4 +220,3 @@ MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. " "so only use it if you are *sure* you are running on this specific " "SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 90d5527ca886..a517d8bae757 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c @@ -263,5 +263,3 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index 3fb83b0c28c2..3b9fff9dcf65 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c @@ -476,4 +476,3 @@ MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>"); MODULE_DESCRIPTION( "Driver for National Semiconductor PC87307/PC97307 watchdog component"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c index 707e027e5002..f353e18b1a82 100644 --- a/drivers/watchdog/sc520_wdt.c +++ b/drivers/watchdog/sc520_wdt.c @@ -433,4 +433,3 @@ MODULE_AUTHOR("Scott and Bill Jennings"); MODULE_DESCRIPTION( "Driver for watchdog timer in AMD \"Elan\" SC520 uProcessor"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index af7b136b1874..b96127ea3de1 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -26,8 +26,7 @@ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/... */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ @@ -545,5 +544,3 @@ module_exit(sch311x_wdt_exit); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("SMSC SCH311x WatchDog Timer Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c index 8ae7c282d465..836377cf9271 100644 --- a/drivers/watchdog/scx200_wdt.c +++ b/drivers/watchdog/scx200_wdt.c @@ -37,7 +37,6 @@ MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int margin = 60; /* in seconds */ module_param(margin, int, 0); diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index 5bca79457768..f9b8e06f3558 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c @@ -343,7 +343,6 @@ MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); MODULE_DESCRIPTION("SuperH watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(clock_division_ratio, int, 0); MODULE_PARM_DESC(clock_division_ratio, diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c new file mode 100644 index 000000000000..ced3edc95957 --- /dev/null +++ b/drivers/watchdog/sirfsoc_wdt.c @@ -0,0 +1,226 @@ +/* + * Watchdog driver for CSR SiRFprimaII and SiRFatlasVI + * + * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/module.h> +#include <linux/watchdog.h> +#include <linux/platform_device.h> +#include <linux/moduleparam.h> +#include <linux/of.h> +#include <linux/io.h> +#include <linux/uaccess.h> + +#define CLOCK_FREQ 1000000 + +#define SIRFSOC_TIMER_COUNTER_LO 0x0000 +#define SIRFSOC_TIMER_MATCH_0 0x0008 +#define SIRFSOC_TIMER_INT_EN 0x0024 +#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028 +#define SIRFSOC_TIMER_LATCH 0x0030 +#define SIRFSOC_TIMER_LATCHED_LO 0x0034 + +#define SIRFSOC_TIMER_WDT_INDEX 5 + +#define SIRFSOC_WDT_MIN_TIMEOUT 30 /* 30 secs */ +#define SIRFSOC_WDT_MAX_TIMEOUT (10 * 60) /* 10 mins */ +#define SIRFSOC_WDT_DEFAULT_TIMEOUT 30 /* 30 secs */ + +static unsigned int timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT; +static bool nowayout = WATCHDOG_NOWAYOUT; + +module_param(timeout, uint, 0); +module_param(nowayout, bool, 0); + +MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)"); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd) +{ + u32 counter, match; + void __iomem *wdt_base; + int time_left; + + wdt_base = watchdog_get_drvdata(wdd); + counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO); + match = readl(wdt_base + + SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2)); + + time_left = match - counter; + + return time_left / CLOCK_FREQ; +} + +static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd) +{ + u32 counter, timeout_ticks; + void __iomem *wdt_base; + + timeout_ticks = wdd->timeout * CLOCK_FREQ; + wdt_base = watchdog_get_drvdata(wdd); + + /* Enable the latch before reading the LATCH_LO register */ + writel(1, wdt_base + SIRFSOC_TIMER_LATCH); + + /* Set the TO value */ + counter = readl(wdt_base + SIRFSOC_TIMER_LATCHED_LO); + + counter += timeout_ticks; + + writel(counter, wdt_base + + SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2)); + + return 0; +} + +static int sirfsoc_wdt_enable(struct watchdog_device *wdd) +{ + void __iomem *wdt_base = watchdog_get_drvdata(wdd); + sirfsoc_wdt_updatetimeout(wdd); + + /* + * NOTE: If interrupt is not enabled + * then WD-Reset doesn't get generated at all. + */ + writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN) + | (1 << SIRFSOC_TIMER_WDT_INDEX), + wdt_base + SIRFSOC_TIMER_INT_EN); + writel(1, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN); + + return 0; +} + +static int sirfsoc_wdt_disable(struct watchdog_device *wdd) +{ + void __iomem *wdt_base = watchdog_get_drvdata(wdd); + + writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN); + writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN) + & (~(1 << SIRFSOC_TIMER_WDT_INDEX)), + wdt_base + SIRFSOC_TIMER_INT_EN); + + return 0; +} + +static int sirfsoc_wdt_settimeout(struct watchdog_device *wdd, unsigned int to) +{ + wdd->timeout = to; + sirfsoc_wdt_updatetimeout(wdd); + + return 0; +} + +#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE) + +static const struct watchdog_info sirfsoc_wdt_ident = { + .options = OPTIONS, + .firmware_version = 0, + .identity = "SiRFSOC Watchdog", +}; + +static struct watchdog_ops sirfsoc_wdt_ops = { + .owner = THIS_MODULE, + .start = sirfsoc_wdt_enable, + .stop = sirfsoc_wdt_disable, + .get_timeleft = sirfsoc_wdt_gettimeleft, + .ping = sirfsoc_wdt_updatetimeout, + .set_timeout = sirfsoc_wdt_settimeout, +}; + +static struct watchdog_device sirfsoc_wdd = { + .info = &sirfsoc_wdt_ident, + .ops = &sirfsoc_wdt_ops, + .timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT, + .min_timeout = SIRFSOC_WDT_MIN_TIMEOUT, + .max_timeout = SIRFSOC_WDT_MAX_TIMEOUT, +}; + +static int sirfsoc_wdt_probe(struct platform_device *pdev) +{ + struct resource *res; + int ret; + void __iomem *base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + watchdog_set_drvdata(&sirfsoc_wdd, base); + + watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev); + watchdog_set_nowayout(&sirfsoc_wdd, nowayout); + + ret = watchdog_register_device(&sirfsoc_wdd); + if (ret) + return ret; + + platform_set_drvdata(pdev, &sirfsoc_wdd); + + return 0; +} + +static void sirfsoc_wdt_shutdown(struct platform_device *pdev) +{ + struct watchdog_device *wdd = platform_get_drvdata(pdev); + + sirfsoc_wdt_disable(wdd); +} + +static int sirfsoc_wdt_remove(struct platform_device *pdev) +{ + sirfsoc_wdt_shutdown(pdev); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sirfsoc_wdt_suspend(struct device *dev) +{ + return 0; +} + +static int sirfsoc_wdt_resume(struct device *dev) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + /* + * NOTE: Since timer controller registers settings are saved + * and restored back by the timer-prima2.c, so we need not + * update WD settings except refreshing timeout. + */ + sirfsoc_wdt_updatetimeout(wdd); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sirfsoc_wdt_pm_ops, + sirfsoc_wdt_suspend, sirfsoc_wdt_resume); + +static const struct of_device_id sirfsoc_wdt_of_match[] = { + { .compatible = "sirf,prima2-tick"}, + {}, +}; +MODULE_DEVICE_TABLE(of, sirfsoc_wdt_of_match); + +static struct platform_driver sirfsoc_wdt_driver = { + .driver = { + .name = "sirfsoc-wdt", + .owner = THIS_MODULE, + .pm = &sirfsoc_wdt_pm_ops, + .of_match_table = of_match_ptr(sirfsoc_wdt_of_match), + }, + .probe = sirfsoc_wdt_probe, + .remove = sirfsoc_wdt_remove, + .shutdown = sirfsoc_wdt_shutdown, +}; +module_platform_driver(sirfsoc_wdt_driver); + +MODULE_DESCRIPTION("SiRF SoC watchdog driver"); +MODULE_AUTHOR("Xianglong Du <Xianglong.Du@csr.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sirfsoc-wdt"); diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c index 6d665f9c1d58..445ea1ad1fa9 100644 --- a/drivers/watchdog/smsc37b787_wdt.c +++ b/drivers/watchdog/smsc37b787_wdt.c @@ -603,8 +603,6 @@ MODULE_DESCRIPTION("Driver for SMsC 37B787 watchdog component (Version " VERSION ")"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - #ifdef SMSC_SUPPORT_MINUTES module_param(unit, int, 0); MODULE_PARM_DESC(unit, diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index b68b1e519d53..ef2638fee4a8 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -207,4 +207,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Software Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 0e9d8c479c35..ce63a1bbf395 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -580,4 +580,3 @@ module_exit(sp5100_tco_cleanup_module); MODULE_AUTHOR("Priyanka Gupta"); MODULE_DESCRIPTION("TCO timer driver for SP5100/SB800 chipset"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 58df98aec122..3f786ce0a6f2 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -268,7 +268,6 @@ static int sp805_wdt_remove(struct amba_device *adev) struct sp805_wdt *wdt = amba_get_drvdata(adev); watchdog_unregister_device(&wdt->wdd); - amba_set_drvdata(adev, NULL); watchdog_set_drvdata(&wdt->wdd, NULL); return 0; diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c index c97e98dcde62..d667f6b51d35 100644 --- a/drivers/watchdog/stmp3xxx_rtc_wdt.c +++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c @@ -30,7 +30,7 @@ MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to " static int wdt_start(struct watchdog_device *wdd) { struct device *dev = watchdog_get_drvdata(wdd); - struct stmp3xxx_wdt_pdata *pdata = dev->platform_data; + struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev); pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE); return 0; @@ -39,7 +39,7 @@ static int wdt_start(struct watchdog_device *wdd) static int wdt_stop(struct watchdog_device *wdd) { struct device *dev = watchdog_get_drvdata(wdd); - struct stmp3xxx_wdt_pdata *pdata = dev->platform_data; + struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev); pdata->wdt_set_timeout(dev->parent, 0); return 0; @@ -108,4 +108,3 @@ module_platform_driver(stmp3xxx_wdt_driver); MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c index f6caa77151c7..76332d893e12 100644 --- a/drivers/watchdog/sunxi_wdt.c +++ b/drivers/watchdog/sunxi_wdt.c @@ -217,7 +217,7 @@ static struct platform_driver sunxi_wdt_driver = { .driver = { .owner = THIS_MODULE, .name = DRV_NAME, - .of_match_table = of_match_ptr(sunxi_wdt_dt_ids) + .of_match_table = sunxi_wdt_dt_ids, }, }; diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index c9b0c627fe7e..09d4831aa61f 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -192,7 +192,7 @@ static int ts72xx_wdt_open(struct inode *inode, struct file *file) dev_err(&wdt->pdev->dev, "failed to convert timeout (%d) to register value\n", timeout); - return -EINVAL; + return regval; } if (mutex_lock_interruptible(&wdt->lock)) @@ -305,7 +305,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case WDIOC_GETSUPPORT: - error = copy_to_user(argp, &winfo, sizeof(winfo)); + if (copy_to_user(argp, &winfo, sizeof(winfo))) + error = -EFAULT; break; case WDIOC_GETSTATUS: @@ -320,10 +321,9 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_SETOPTIONS: { int options; - if (get_user(options, p)) { - error = -EFAULT; + error = get_user(options, p); + if (error) break; - } error = -EINVAL; @@ -341,30 +341,26 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_SETTIMEOUT: { int new_timeout; + int regval; - if (get_user(new_timeout, p)) { - error = -EFAULT; - } else { - int regval; - - regval = timeout_to_regval(new_timeout); - if (regval < 0) { - error = -EINVAL; - } else { - ts72xx_wdt_stop(wdt); - wdt->regval = regval; - ts72xx_wdt_start(wdt); - } - } + error = get_user(new_timeout, p); if (error) break; + regval = timeout_to_regval(new_timeout); + if (regval < 0) { + error = regval; + break; + } + ts72xx_wdt_stop(wdt); + wdt->regval = regval; + ts72xx_wdt_start(wdt); + /*FALLTHROUGH*/ } case WDIOC_GETTIMEOUT: - if (put_user(regval_to_timeout(wdt->regval), p)) - error = -EFAULT; + error = put_user(regval_to_timeout(wdt->regval), p); break; default: diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c index 88f23c5cfddb..0fd0e8ae62a8 100644 --- a/drivers/watchdog/txx9wdt.c +++ b/drivers/watchdog/txx9wdt.c @@ -176,5 +176,4 @@ module_platform_driver_probe(txx9wdt_driver, txx9wdt_probe); MODULE_DESCRIPTION("TXx9 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:txx9wdt"); diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c index a614d84121c3..e029b5768f2c 100644 --- a/drivers/watchdog/ux500_wdt.c +++ b/drivers/watchdog/ux500_wdt.c @@ -88,7 +88,7 @@ static struct watchdog_device ux500_wdt = { static int ux500_wdt_probe(struct platform_device *pdev) { int ret; - struct ux500_wdt_data *pdata = pdev->dev.platform_data; + struct ux500_wdt_data *pdata = dev_get_platdata(&pdev->dev); if (pdata) { if (pdata->timeout > 0) @@ -167,5 +167,4 @@ module_platform_driver(ux500_wdt_driver); MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>"); MODULE_DESCRIPTION("Ux500 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:ux500_wdt"); diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 92f1326f0cfc..e24b21082874 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -1,6 +1,9 @@ /* * w83627hf/thf WDT driver * + * (c) Copyright 2013 Guenter Roeck + * converted to watchdog infrastructure + * * (c) Copyright 2007 Vlad Drukker <vlad@storewiz.com> * added support for W83627THF. * @@ -31,31 +34,22 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> -#include <linux/fs.h> #include <linux/ioport.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> -#include <linux/spinlock.h> #include <linux/io.h> -#include <linux/uaccess.h> - #define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT" #define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */ -static unsigned long wdt_is_open; -static char expect_close; -static DEFINE_SPINLOCK(io_lock); - /* You must set this - there is no sane way to probe for this board. */ static int wdt_io = 0x2E; module_param(wdt_io, int, 0); MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)"); -static int timeout = WATCHDOG_TIMEOUT; /* in seconds */ +static int timeout; /* in seconds */ module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1 <= timeout <= 255, default=" @@ -76,236 +70,147 @@ MODULE_PARM_DESC(nowayout, (same as EFER) */ #define WDT_EFDR (WDT_EFIR+1) /* Extended Function Data Register */ -static void w83627hf_select_wd_register(void) +#define W83627HF_LD_WDT 0x08 + +static void superio_outb(int reg, int val) { - unsigned char c; + outb(reg, WDT_EFER); + outb(val, WDT_EFDR); +} + +static inline int superio_inb(int reg) +{ + outb(reg, WDT_EFER); + return inb(WDT_EFDR); +} + +static int superio_enter(void) +{ + if (!request_muxed_region(wdt_io, 2, WATCHDOG_NAME)) + return -EBUSY; + outb_p(0x87, WDT_EFER); /* Enter extended function mode */ outb_p(0x87, WDT_EFER); /* Again according to manual */ - outb(0x20, WDT_EFER); /* check chip version */ - c = inb(WDT_EFDR); - if (c == 0x82) { /* W83627THF */ - outb_p(0x2b, WDT_EFER); /* select GPIO3 */ - c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */ - outb_p(0x2b, WDT_EFER); - outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */ - } else if (c == 0x88 || c == 0xa0) { /* W83627EHF / W83627DHG */ - outb_p(0x2d, WDT_EFER); /* select GPIO5 */ - c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */ - outb_p(0x2d, WDT_EFER); - outb_p(c, WDT_EFDR); /* set GPIO5 to WDT0 */ - } + return 0; +} - outb_p(0x07, WDT_EFER); /* point to logical device number reg */ - outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ - outb_p(0x30, WDT_EFER); /* select CR30 */ - outb_p(0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ +static void superio_select(int ld) +{ + superio_outb(0x07, ld); } -static void w83627hf_unselect_wd_register(void) +static void superio_exit(void) { outb_p(0xAA, WDT_EFER); /* Leave extended function mode */ + release_region(wdt_io, 2); } /* tyan motherboards seem to set F5 to 0x4C ? * So explicitly init to appropriate value. */ -static void w83627hf_init(void) +static int w83627hf_init(struct watchdog_device *wdog) { + int ret; unsigned char t; - w83627hf_select_wd_register(); + ret = superio_enter(); + if (ret) + return ret; + + superio_select(W83627HF_LD_WDT); + t = superio_inb(0x20); /* check chip version */ + if (t == 0x82) { /* W83627THF */ + t = (superio_inb(0x2b) & 0xf7); + superio_outb(0x2b, t | 0x04); /* set GPIO3 to WDT0 */ + } else if (t == 0x88 || t == 0xa0) { /* W83627EHF / W83627DHG */ + t = superio_inb(0x2d); + superio_outb(0x2d, t & ~0x01); /* set GPIO5 to WDT0 */ + } + + /* set CR30 bit 0 to activate GPIO2 */ + t = superio_inb(0x30); + if (!(t & 0x01)) + superio_outb(0x30, t | 0x01); - outb_p(0xF6, WDT_EFER); /* Select CRF6 */ - t = inb_p(WDT_EFDR); /* read CRF6 */ + t = superio_inb(0xF6); if (t != 0) { pr_info("Watchdog already running. Resetting timeout to %d sec\n", - timeout); - outb_p(timeout, WDT_EFDR); /* Write back to CRF6 */ + wdog->timeout); + superio_outb(0xF6, wdog->timeout); } - outb_p(0xF5, WDT_EFER); /* Select CRF5 */ - t = inb_p(WDT_EFDR); /* read CRF5 */ - t &= ~0x0C; /* set second mode & disable keyboard - turning off watchdog */ - t |= 0x02; /* enable the WDTO# output low pulse - to the KBRST# pin (PIN60) */ - outb_p(t, WDT_EFDR); /* Write back to CRF5 */ - - outb_p(0xF7, WDT_EFER); /* Select CRF7 */ - t = inb_p(WDT_EFDR); /* read CRF7 */ - t &= ~0xC0; /* disable keyboard & mouse turning off - watchdog */ - outb_p(t, WDT_EFDR); /* Write back to CRF7 */ - - w83627hf_unselect_wd_register(); -} - -static void wdt_set_time(int timeout) -{ - spin_lock(&io_lock); - - w83627hf_select_wd_register(); + /* set second mode & disable keyboard turning off watchdog */ + t = superio_inb(0xF5) & ~0x0C; + /* enable the WDTO# output low pulse to the KBRST# pin */ + t |= 0x02; + superio_outb(0xF5, t); - outb_p(0xF6, WDT_EFER); /* Select CRF6 */ - outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF6 */ + /* disable keyboard & mouse turning off watchdog */ + t = superio_inb(0xF7) & ~0xC0; + superio_outb(0xF7, t); - w83627hf_unselect_wd_register(); + superio_exit(); - spin_unlock(&io_lock); -} - -static int wdt_ping(void) -{ - wdt_set_time(timeout); return 0; } -static int wdt_disable(void) +static int wdt_set_time(unsigned int timeout) { - wdt_set_time(0); - return 0; -} + int ret; + + ret = superio_enter(); + if (ret) + return ret; + + superio_select(W83627HF_LD_WDT); + superio_outb(0xF6, timeout); + superio_exit(); -static int wdt_set_heartbeat(int t) -{ - if (t < 1 || t > 255) - return -EINVAL; - timeout = t; return 0; } -static int wdt_get_time(void) +static int wdt_start(struct watchdog_device *wdog) { - int timeleft; - - spin_lock(&io_lock); - - w83627hf_select_wd_register(); - - outb_p(0xF6, WDT_EFER); /* Select CRF6 */ - timeleft = inb_p(WDT_EFDR); /* Read Timeout counter to CRF6 */ - - w83627hf_unselect_wd_register(); - - spin_unlock(&io_lock); - - return timeleft; + return wdt_set_time(wdog->timeout); } -static ssize_t wdt_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static int wdt_stop(struct watchdog_device *wdog) { - if (count) { - if (!nowayout) { - size_t i; - - expect_close = 0; - - for (i = 0; i != count; i++) { - char c; - if (get_user(c, buf + i)) - return -EFAULT; - if (c == 'V') - expect_close = 42; - } - } - wdt_ping(); - } - return count; + return wdt_set_time(0); } -static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static int wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout) { - void __user *argp = (void __user *)arg; - int __user *p = argp; - int timeval; - static const struct watchdog_info ident = { - .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | - WDIOF_MAGICCLOSE, - .firmware_version = 1, - .identity = "W83627HF WDT", - }; - - switch (cmd) { - case WDIOC_GETSUPPORT: - if (copy_to_user(argp, &ident, sizeof(ident))) - return -EFAULT; - break; - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - return put_user(0, p); - case WDIOC_SETOPTIONS: - { - int options, retval = -EINVAL; - - if (get_user(options, p)) - return -EFAULT; - if (options & WDIOS_DISABLECARD) { - wdt_disable(); - retval = 0; - } - if (options & WDIOS_ENABLECARD) { - wdt_ping(); - retval = 0; - } - return retval; - } - case WDIOC_KEEPALIVE: - wdt_ping(); - break; - case WDIOC_SETTIMEOUT: - if (get_user(timeval, p)) - return -EFAULT; - if (wdt_set_heartbeat(timeval)) - return -EINVAL; - wdt_ping(); - /* Fall */ - case WDIOC_GETTIMEOUT: - return put_user(timeout, p); - case WDIOC_GETTIMELEFT: - timeval = wdt_get_time(); - return put_user(timeval, p); - default: - return -ENOTTY; - } + wdog->timeout = timeout; + return 0; } -static int wdt_open(struct inode *inode, struct file *file) +static unsigned int wdt_get_time(struct watchdog_device *wdog) { - if (test_and_set_bit(0, &wdt_is_open)) - return -EBUSY; - /* - * Activate - */ + unsigned int timeleft; + int ret; - wdt_ping(); - return nonseekable_open(inode, file); -} + ret = superio_enter(); + if (ret) + return 0; -static int wdt_close(struct inode *inode, struct file *file) -{ - if (expect_close == 42) - wdt_disable(); - else { - pr_crit("Unexpected close, not stopping watchdog!\n"); - wdt_ping(); - } - expect_close = 0; - clear_bit(0, &wdt_is_open); - return 0; + superio_select(W83627HF_LD_WDT); + timeleft = superio_inb(0xF6); + superio_exit(); + + return timeleft; } /* * Notifier for system down */ - static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) - wdt_disable(); /* Turn the WDT off */ + wdt_set_time(0); /* Turn the WDT off */ return NOTIFY_DONE; } @@ -314,19 +219,25 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, * Kernel Interfaces */ -static const struct file_operations wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .write = wdt_write, - .unlocked_ioctl = wdt_ioctl, - .open = wdt_open, - .release = wdt_close, +static struct watchdog_info wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "W83627HF Watchdog", }; -static struct miscdevice wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &wdt_fops, +static struct watchdog_ops wdt_ops = { + .owner = THIS_MODULE, + .start = wdt_start, + .stop = wdt_stop, + .set_timeout = wdt_set_timeout, + .get_timeleft = wdt_get_time, +}; + +static struct watchdog_device wdt_dev = { + .info = &wdt_info, + .ops = &wdt_ops, + .timeout = WATCHDOG_TIMEOUT, + .min_timeout = 1, + .max_timeout = 255, }; /* @@ -344,50 +255,39 @@ static int __init wdt_init(void) pr_info("WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising\n"); - if (wdt_set_heartbeat(timeout)) { - wdt_set_heartbeat(WATCHDOG_TIMEOUT); - pr_info("timeout value must be 1 <= timeout <= 255, using %d\n", - WATCHDOG_TIMEOUT); - } + watchdog_init_timeout(&wdt_dev, timeout, NULL); + watchdog_set_nowayout(&wdt_dev, nowayout); - if (!request_region(wdt_io, 1, WATCHDOG_NAME)) { - pr_err("I/O address 0x%04x already in use\n", wdt_io); - ret = -EIO; - goto out; + ret = w83627hf_init(&wdt_dev); + if (ret) { + pr_err("failed to initialize watchdog (err=%d)\n", ret); + return ret; } - w83627hf_init(); - ret = register_reboot_notifier(&wdt_notifier); if (ret != 0) { pr_err("cannot register reboot notifier (err=%d)\n", ret); - goto unreg_regions; + return ret; } - ret = misc_register(&wdt_miscdev); - if (ret != 0) { - pr_err("cannot register miscdev on minor=%d (err=%d)\n", - WATCHDOG_MINOR, ret); + ret = watchdog_register_device(&wdt_dev); + if (ret) goto unreg_reboot; - } pr_info("initialized. timeout=%d sec (nowayout=%d)\n", - timeout, nowayout); + wdt_dev.timeout, nowayout); -out: return ret; + unreg_reboot: unregister_reboot_notifier(&wdt_notifier); -unreg_regions: - release_region(wdt_io, 1); - goto out; + return ret; } static void __exit wdt_exit(void) { - misc_deregister(&wdt_miscdev); + watchdog_unregister_device(&wdt_dev); unregister_reboot_notifier(&wdt_notifier); - release_region(wdt_io, 1); } module_init(wdt_init); @@ -396,4 +296,3 @@ module_exit(wdt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>"); MODULE_DESCRIPTION("w83627hf/thf WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c index cd9f3c1e1af4..aaf2995d37f4 100644 --- a/drivers/watchdog/w83697hf_wdt.c +++ b/drivers/watchdog/w83697hf_wdt.c @@ -458,4 +458,3 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marcus Junker <junker@anduras.de>, " "Samuel Tardieu <sam@rfc1149.net>"); MODULE_DESCRIPTION("w83697hf/hg WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index 274be0bfaf24..ff58cb74671f 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c @@ -395,4 +395,3 @@ module_exit(wdt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Flemming Frandsen <ff@nrvissing.net>"); MODULE_DESCRIPTION("w83697ug/uf WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c index 7874ae06232b..f0483c75ed32 100644 --- a/drivers/watchdog/w83877f_wdt.c +++ b/drivers/watchdog/w83877f_wdt.c @@ -406,4 +406,3 @@ module_exit(w83877f_wdt_unload); MODULE_AUTHOR("Scott and Bill Jennings"); MODULE_DESCRIPTION("Driver for watchdog timer in w83877f chip"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 5d2c902825c2..91bf55a20024 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -527,4 +527,3 @@ module_exit(w83977f_wdt_exit); MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>"); MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index 25aba6e00a23..db0da7ea4fd8 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -322,6 +322,5 @@ module_exit(wafwdt_exit); MODULE_AUTHOR("Justin Cormack"); MODULE_DESCRIPTION("ICP Wafer 5823 Single Board Computer WDT driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* end of wafer5823wdt.c */ diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 05d18b4c661b..461336c4519f 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -77,7 +77,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd, watchdog_check_min_max_timeout(wdd); - /* try to get the tiemout module parameter first */ + /* try to get the timeout module parameter first */ if (!watchdog_timeout_invalid(wdd, timeout_parm)) { wdd->timeout = timeout_parm; return ret; diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index 3045debd5411..0240c60d14e3 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c @@ -48,8 +48,6 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); MODULE_DESCRIPTION("RTAS watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); static bool wdrtas_nowayout = WATCHDOG_NOWAYOUT; static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0); diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c index ee4333c01109..e0206b5b7d89 100644 --- a/drivers/watchdog/wdt.c +++ b/drivers/watchdog/wdt.c @@ -664,6 +664,4 @@ module_exit(wdt_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Driver for ISA ICS watchdog cards (WDT500/501)"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c index 5eec74053882..7355ddd0b207 100644 --- a/drivers/watchdog/wdt285.c +++ b/drivers/watchdog/wdt285.c @@ -224,7 +224,6 @@ static void __exit footbridge_watchdog_exit(void) MODULE_AUTHOR("Phil Blundell <pb@nexus.co.uk>"); MODULE_DESCRIPTION("Footbridge watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c index 65a402344933..a8e6f87f60c9 100644 --- a/drivers/watchdog/wdt977.c +++ b/drivers/watchdog/wdt977.c @@ -507,4 +507,3 @@ module_exit(wd977_exit); MODULE_AUTHOR("Woody Suwalski <woodys@xandros.com>"); MODULE_DESCRIPTION("W83977AF Watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index 36a54c0e32dd..ee89ba4dea63 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c @@ -744,5 +744,3 @@ module_pci_driver(wdtpci_driver); MODULE_AUTHOR("JP Nollmann, Alan Cox"); MODULE_DESCRIPTION("Driver for the ICS PCI-WDT500/501 watchdog cards"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c index d4e47eda4182..e243bd01c774 100644 --- a/drivers/watchdog/wm831x_wdt.c +++ b/drivers/watchdog/wm831x_wdt.c @@ -184,7 +184,7 @@ static const struct watchdog_ops wm831x_wdt_ops = { static int wm831x_wdt_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); - struct wm831x_pdata *chip_pdata; + struct wm831x_pdata *chip_pdata = dev_get_platdata(pdev->dev.parent); struct wm831x_watchdog_pdata *pdata; struct wm831x_wdt_drvdata *driver_data; struct watchdog_device *wm831x_wdt; @@ -231,12 +231,10 @@ static int wm831x_wdt_probe(struct platform_device *pdev) wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time; /* Apply any configuration */ - if (pdev->dev.parent->platform_data) { - chip_pdata = pdev->dev.parent->platform_data; + if (chip_pdata) pdata = chip_pdata->watchdog; - } else { + else pdata = NULL; - } if (pdata) { reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK | diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c index 92ad33d0cb71..7a42dffd39e5 100644 --- a/drivers/watchdog/xen_wdt.c +++ b/drivers/watchdog/xen_wdt.c @@ -362,4 +362,3 @@ MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>"); MODULE_DESCRIPTION("Xen WatchDog Timer Driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index d15f6e80479f..188825122aae 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -59,12 +59,12 @@ static int xen_add_device(struct device *dev) add.flags = XEN_PCI_DEV_EXTFN; #ifdef CONFIG_ACPI - handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); + handle = ACPI_HANDLE(&pci_dev->dev); if (!handle && pci_dev->bus->bridge) - handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); + handle = ACPI_HANDLE(pci_dev->bus->bridge); #ifdef CONFIG_PCI_IOV if (!handle && pci_dev->is_virtfn) - handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); + handle = ACPI_HANDLE(physfn->bus->bridge); #endif if (handle) { acpi_status status; |