diff options
author | Linus Walleij <linus.walleij@linaro.org> | 2015-08-13 15:42:55 +0300 |
---|---|---|
committer | Linus Walleij <linus.walleij@linaro.org> | 2015-08-13 15:42:55 +0300 |
commit | 5f6f02cd49d61e9856ff2c337578316a1a1b3f88 (patch) | |
tree | fd2e6961e8f609af41dd4a282008e1bc52cfa7c7 /drivers | |
parent | 3afa129a9de0957d72165cf08a54e5c69938011c (diff) | |
parent | cbfe8fa6cd672011c755c3cd85c9ffd4e2d10a6f (diff) | |
download | linux-5f6f02cd49d61e9856ff2c337578316a1a1b3f88.tar.xz |
Merge tag 'v4.2-rc4' into devel
Linux 4.2-rc4
Diffstat (limited to 'drivers')
357 files changed, 3310 insertions, 2364 deletions
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 8244f013f210..f1c966e05078 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -193,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win, u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; bool wp = addr->info.mem.write_protect; u64 len = attr->address_length; + u64 start, end, offset = 0; struct resource *res = &win->res; /* @@ -204,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win, pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", addr->min_address_fixed, addr->max_address_fixed, len); - res->start = attr->minimum; - res->end = attr->maximum; - /* * For bridges that translate addresses across the bridge, * translation_offset is the offset that must be added to the @@ -214,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win, * primary side. Non-bridge devices must list 0 for all Address * Translation offset bits. */ - if (addr->producer_consumer == ACPI_PRODUCER) { - res->start += attr->translation_offset; - res->end += attr->translation_offset; - } else if (attr->translation_offset) { + if (addr->producer_consumer == ACPI_PRODUCER) + offset = attr->translation_offset; + else if (attr->translation_offset) pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", attr->translation_offset); + start = attr->minimum + offset; + end = attr->maximum + offset; + + win->offset = offset; + res->start = start; + res->end = end; + if (sizeof(resource_size_t) < sizeof(u64) && + (offset != win->offset || start != res->start || end != res->end)) { + pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n", + attr->minimum, attr->maximum); + return false; } switch (addr->resource_type) { @@ -236,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win, return false; } - win->offset = attr->translation_offset; - if (addr->producer_consumer == ACPI_PRODUCER) res->flags |= IORESOURCE_WINDOW; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e83fc3d0da9c..db5d9f79a247 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) + dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, + dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) dev->max_sectors = ATA_MAX_SECTORS_LBA48; @@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, + /* + * Causes silent data corruption with higher max sects. + * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com + */ + { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, + /* Devices we expect to fail diagnostics */ /* Devices where NCQ should be avoided */ @@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ + /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ @@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + /* devices that don't properly handle TRIM commands */ + { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT * (Return Zero After Trim) flags in the ATA Command Set are @@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) else /* In the ancient relic department - skip all of this */ return 0; - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + /* On some disks, this command causes spin-up, so we need longer timeout */ + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 7ccc084bf1df..85aa76116a30 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap) ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; } + } else if (vendor == 0x11ab && devid == 0x4140) { + /* Marvell 4140 quirks */ + ata_for_each_link(link, ap, EDGE) { + /* port 4 is for SEMB device and it doesn't like SRST */ + if (link->pmp == 4) + link->flags |= ATA_LFLAG_DISABLED; + } } } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3131adcc1f87..641a61a59e89 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[14] = (lowest_aligned >> 8) & 0x3f; rbuf[15] = lowest_aligned; - if (ata_id_has_trim(args->id)) { + if (ata_id_has_trim(args->id) && + !(dev->horkage & ATA_HORKAGE_NOTRIM)) { rbuf[14] |= 0x80; /* LBPME */ if (ata_id_has_zero_after_trim(args->id) && diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index d6c37bcd416d..e2d94972962d 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev, if (!ata_id_has_trim(ata_dev->id)) mode = "unsupported"; + else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM) + mode = "forced_unsupported"; else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) mode = "forced_unqueued"; else if (ata_fpdma_dsm_supported(ata_dev)) diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index a9b0c820f2eb..5d9ee99c2148 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -4,7 +4,7 @@ * Arasan Compact Flash host controller source file * * Copyright (C) 2011 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -968,7 +968,7 @@ static struct platform_driver arasan_cf_driver = { module_platform_driver(arasan_cf_driver); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 69de41a87b74..3177b245d2bd 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) while ((entry = llist_del_all(&cq->list)) != NULL) { entry = llist_reverse_order(entry); do { + struct request_queue *q = NULL; + cmd = container_of(entry, struct nullb_cmd, ll_list); entry = entry->next; + if (cmd->rq) + q = cmd->rq->q; end_cmd(cmd); - if (cmd->rq) { - struct request_queue *q = cmd->rq->q; - - if (!q->mq_ops && blk_queue_stopped(q)) { - spin_lock(q->queue_lock); - if (blk_queue_stopped(q)) - blk_start_queue(q); - spin_unlock(q->queue_lock); - } + if (q && !q->mq_ops && blk_queue_stopped(q)) { + spin_lock(q->queue_lock); + if (blk_queue_stopped(q)) + blk_start_queue(q); + spin_unlock(q->queue_lock); } } while (entry); } diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index d1d6141920d3..7920c2741b47 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) goto out_free_disk; add_disk(ns->disk); - if (ns->ms) - revalidate_disk(ns->disk); + if (ns->ms) { + struct block_device *bd = bdget_disk(ns->disk, 0); + if (!bd) + return; + if (blkdev_get(bd, FMODE_READ, NULL)) { + bdput(bd); + return; + } + blkdev_reread_part(bd); + blkdev_put(bd, FMODE_READ); + } return; out_free_disk: kfree(disk); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 1e1a4323a71f..9ceb8ac68fdc 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev) /* Read Verbose Config Version Info */ skb = btbcm_read_verbose_config(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], - get_unaligned_le16(skb->data + 5)); - kfree_skb(skb); + if (!IS_ERR(skb)) { + BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], + get_unaligned_le16(skb->data + 5)); + kfree_skb(skb); + } set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 283f00a7f036..1082d4bb016a 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, device_initialize(&chip->dev); - chip->cdev.owner = chip->pdev->driver->owner; cdev_init(&chip->cdev, &tpm_fops); + chip->cdev.owner = chip->pdev->driver->owner; + chip->cdev.kobj.parent = &chip->dev.kobj; return chip; } diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 44f9d20c19ac..1267322595da 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device) return -ENODEV; } + /* At least some versions of AMI BIOS have a bug that TPM2 table has + * zero address for the control area and therefore we must fail. + */ + if (!buf->control_area_pa) { + dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n"); + return -EINVAL; + } + if (buf->hdr.length < sizeof(struct acpi_tpm2)) { dev_err(dev, "TPM2 ACPI table has wrong size"); return -EINVAL; diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c index bdfb4421c643..f271c350ef94 100644 --- a/drivers/clk/spear/clk-aux-synth.c +++ b/drivers/clk/spear/clk-aux-synth.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c index dffd4ce6c8b5..58d678b5b40a 100644 --- a/drivers/clk/spear/clk-frac-synth.c +++ b/drivers/clk/spear/clk-frac-synth.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c index 1afc18c4effc..1a722e99e76e 100644 --- a/drivers/clk/spear/clk-gpt-synth.c +++ b/drivers/clk/spear/clk-gpt-synth.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c index 1b9b65bca51e..5ebddc528145 100644 --- a/drivers/clk/spear/clk-vco-pll.c +++ b/drivers/clk/spear/clk-vco-pll.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c index 628b6d5ed3d9..157fe099ea6a 100644 --- a/drivers/clk/spear/clk.c +++ b/drivers/clk/spear/clk.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h index 931737677dfa..9834944f08b1 100644 --- a/drivers/clk/spear/clk.h +++ b/drivers/clk/spear/clk.h @@ -2,7 +2,7 @@ * Clock framework definitions for SPEAr platform * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c index 4daa5977793a..222ce108b41a 100644 --- a/drivers/clk/spear/spear1310_clock.c +++ b/drivers/clk/spear/spear1310_clock.c @@ -4,7 +4,7 @@ * SPEAr1310 machine clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c index 5a5c6648308d..973c9d3fbcf8 100644 --- a/drivers/clk/spear/spear1340_clock.c +++ b/drivers/clk/spear/spear1340_clock.c @@ -4,7 +4,7 @@ * SPEAr1340 machine clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index bb5f387774e2..404a55edd613 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -2,7 +2,7 @@ * SPEAr3xx machines clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c index 4f649c9cb094..231061fa73a4 100644 --- a/drivers/clk/spear/spear6xx_clock.c +++ b/drivers/clk/spear/spear6xx_clock.c @@ -2,7 +2,7 @@ * SPEAr6xx machines clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b612411655f9..26063afb3eba 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(get_governor_parent_kobj); +struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) +{ + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + + return policy && !policy_is_inactive(policy) ? + policy->freq_table : NULL; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); + static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; @@ -1132,6 +1141,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) down_write(&policy->rwsem); policy->cpu = cpu; + policy->governor = NULL; up_write(&policy->rwsem); } diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index df14766a8e06..dfbbf981ed56 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); -struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); - -struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - return policy ? policy->freq_table : NULL; -} -EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); - MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq frequency table helpers"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index e8e2775c3821..48b7228563ad 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static void enter_freeze_proper(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { - tick_freeze(); + /* + * trace_suspend_resume() called by tick_freeze() for the last CPU + * executing it contains RCU usage regarded as invalid in the idle + * context, so tell RCU about that. + */ + RCU_NONIDLE(tick_freeze()); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping @@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, WARN_ON(!irqs_disabled()); /* * timekeeping_resume() that will be called by tick_unfreeze() for the - * last CPU executing it calls functions containing RCU read-side + * first CPU executing it calls functions containing RCU read-side * critical sections, so tell RCU about that. */ RCU_NONIDLE(tick_unfreeze()); diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 67f80813a06f..e4311ce0cd78 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -494,8 +494,9 @@ out: static int ccm4309_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct blkcipher_desc desc; - u8 *iv = nx_ctx->priv.ccm.iv; + u8 *iv = rctx->iv; iv[0] = 3; memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); @@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req) static int ccm4309_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct blkcipher_desc desc; - u8 *iv = nx_ctx->priv.ccm.iv; + u8 *iv = rctx->iv; iv[0] = 3; memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 2617cd4d54dd..dd7e9f3f5b6b 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm, if (key_len < CTR_RFC3686_NONCE_SIZE) return -EINVAL; - memcpy(nx_ctx->priv.ctr.iv, + memcpy(nx_ctx->priv.ctr.nonce, in_key + key_len - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); @@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc, unsigned int nbytes) { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); - u8 *iv = nx_ctx->priv.ctr.iv; + u8 iv[16]; + memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, desc->info, CTR_RFC3686_IV_SIZE); iv[12] = iv[13] = iv[14] = 0; iv[15] = 1; - desc->info = nx_ctx->priv.ctr.iv; + desc->info = iv; return ctr_aes_nx_crypt(desc, dst, src, nbytes); } diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 08ac6d48688c..92c993f08213 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -317,6 +317,7 @@ out: static int gcm_aes_nx_crypt(struct aead_request *req, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; @@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) spin_lock_irqsave(&nx_ctx->lock, irq_flags); - desc.info = nx_ctx->priv.gcm.iv; + desc.info = rctx->iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; @@ -424,8 +425,8 @@ out: static int gcm_aes_nx_encrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; memcpy(iv, req->iv, 12); @@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req) static int gcm_aes_nx_decrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; memcpy(iv, req->iv, 12); @@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req) static int gcm4106_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); @@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req) static int gcm4106_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); - char *iv = nx_ctx->priv.gcm.iv; + struct nx_gcm_rctx *rctx = aead_request_ctx(req); + char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 8c2faffab4a3..c2f7d4befb55 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; switch (key_len) { case AES_KEYSIZE_128: @@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, return -EINVAL; } - memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); + memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len); return 0; } @@ -148,32 +149,29 @@ out: return rc; } -static int nx_xcbc_init(struct shash_desc *desc) +static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm) { - struct xcbc_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; - struct nx_sg *out_sg; - int len; + int err; - nx_ctx_init(nx_ctx, HCOP_FC_AES); + err = nx_crypto_ctx_aes_xcbc_init(tfm); + if (err) + return err; - memset(sctx, 0, sizeof *sctx); + nx_ctx_init(nx_ctx, HCOP_FC_AES); NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; - memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); - memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); - - len = AES_BLOCK_SIZE; - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, nx_ctx->ap->sglen); + return 0; +} - if (len != AES_BLOCK_SIZE) - return -EINVAL; +static int nx_xcbc_init(struct shash_desc *desc) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); - nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + memset(sctx, 0, sizeof *sctx); return 0; } @@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; + struct nx_sg *out_sg; u32 to_process = 0, leftover, total; unsigned int max_sg_len; unsigned long irq_flags; @@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc, max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); + data_len = AES_BLOCK_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, nx_ctx->ap->sglen); + + if (data_len != AES_BLOCK_SIZE) { + rc = -EINVAL; + goto out; + } + + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + do { to_process = total - to_process; to_process = to_process & ~(AES_BLOCK_SIZE - 1); @@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc, (u8 *) sctx->buffer, &data_len, max_sg_len); - if (data_len != sctx->count) - return -EINVAL; + if (data_len != sctx->count) { + rc = -EINVAL; + goto out; + } } data_len = to_process - sctx->count; @@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc, &data_len, max_sg_len); - if (data_len != to_process - sctx->count) - return -EINVAL; + if (data_len != to_process - sctx->count) { + rc = -EINVAL; + goto out; + } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); @@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, &len, nx_ctx->ap->sglen); - if (len != sctx->count) - return -EINVAL; + if (len != sctx->count) { + rc = -EINVAL; + goto out; + } len = AES_BLOCK_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, nx_ctx->ap->sglen); - if (len != AES_BLOCK_SIZE) - return -EINVAL; + if (len != AES_BLOCK_SIZE) { + rc = -EINVAL; + goto out; + } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); @@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = { .cra_blocksize = AES_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_aes_xcbc_init, + .cra_init = nx_crypto_ctx_aes_xcbc_init2, .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 4e91bdb83c59..08f8d5cd6334 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -29,34 +29,28 @@ #include "nx.h" -static int nx_sha256_init(struct shash_desc *desc) +static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) { - struct sha256_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); - struct nx_sg *out_sg; - int len; - u32 max_sg_len; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + int err; - nx_ctx_init(nx_ctx, HCOP_FC_SHA); + err = nx_crypto_ctx_sha_init(tfm); + if (err) + return err; - memset(sctx, 0, sizeof *sctx); + nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); - max_sg_len = min_t(u64, nx_ctx->ap->sglen, - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); - max_sg_len = min_t(u64, max_sg_len, - nx_ctx->ap->databytelen/NX_PAGE_SIZE); + return 0; +} - len = SHA256_DIGEST_SIZE; - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, max_sg_len); - nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); +static int nx_sha256_init(struct shash_desc *desc) { + struct sha256_state *sctx = shash_desc_ctx(desc); - if (len != SHA256_DIGEST_SIZE) - return -EINVAL; + memset(sctx, 0, sizeof *sctx); sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); @@ -78,6 +72,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; + struct nx_sg *out_sg; u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; @@ -108,6 +103,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); + data_len = SHA256_DIGEST_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &data_len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (data_len != SHA256_DIGEST_SIZE) { + rc = -EINVAL; + goto out; + } + do { /* * to_process: the SHA256_BLOCK_SIZE data chunk to process in @@ -282,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha_init, + .cra_init = nx_crypto_ctx_sha256_init, .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index e6a58d2ee628..aff0fe58eac0 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -28,34 +28,29 @@ #include "nx.h" -static int nx_sha512_init(struct shash_desc *desc) +static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm) { - struct sha512_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); - struct nx_sg *out_sg; - int len; - u32 max_sg_len; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + int err; - nx_ctx_init(nx_ctx, HCOP_FC_SHA); + err = nx_crypto_ctx_sha_init(tfm); + if (err) + return err; - memset(sctx, 0, sizeof *sctx); + nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); - max_sg_len = min_t(u64, nx_ctx->ap->sglen, - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); - max_sg_len = min_t(u64, max_sg_len, - nx_ctx->ap->databytelen/NX_PAGE_SIZE); + return 0; +} - len = SHA512_DIGEST_SIZE; - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, max_sg_len); - nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); +static int nx_sha512_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); - if (len != SHA512_DIGEST_SIZE) - return -EINVAL; + memset(sctx, 0, sizeof *sctx); sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); @@ -77,6 +72,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; + struct nx_sg *out_sg; u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; @@ -107,6 +103,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); + data_len = SHA512_DIGEST_SIZE; + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &data_len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (data_len != SHA512_DIGEST_SIZE) { + rc = -EINVAL; + goto out; + } + do { /* * to_process: the SHA512_BLOCK_SIZE data chunk to process in @@ -288,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = { .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha_init, + .cra_init = nx_crypto_ctx_sha512_init, .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index f6198f29a4a8..436971343ff7 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) /* entry points from the crypto tfm initializers */ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) { + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct nx_ccm_rctx)); return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, NX_MODE_AES_CCM); } int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) { + crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx)); return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, NX_MODE_AES_GCM); } diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index de3ea8738146..cdff03a42ae7 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -2,6 +2,8 @@ #ifndef __NX_H__ #define __NX_H__ +#include <crypto/ctr.h> + #define NX_NAME "nx-crypto" #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" #define NX_VERSION "1.0" @@ -91,8 +93,11 @@ struct nx_crypto_driver { #define NX_GCM4106_NONCE_LEN (4) #define NX_GCM_CTR_OFFSET (12) -struct nx_gcm_priv { +struct nx_gcm_rctx { u8 iv[16]; +}; + +struct nx_gcm_priv { u8 iauth_tag[16]; u8 nonce[NX_GCM4106_NONCE_LEN]; }; @@ -100,8 +105,11 @@ struct nx_gcm_priv { #define NX_CCM_AES_KEY_LEN (16) #define NX_CCM4309_AES_KEY_LEN (19) #define NX_CCM4309_NONCE_LEN (3) -struct nx_ccm_priv { +struct nx_ccm_rctx { u8 iv[16]; +}; + +struct nx_ccm_priv { u8 b0[16]; u8 iauth_tag[16]; u8 oauth_tag[16]; @@ -113,7 +121,7 @@ struct nx_xcbc_priv { }; struct nx_ctr_priv { - u8 iv[16]; + u8 nonce[CTR_RFC3686_NONCE_SIZE]; }; struct nx_crypto_ctx { diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 46307098f8ba..0a70e46d5416 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); - return err; } diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 1022c2e1a2b0..cf1c87fa1edd 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1746,4 +1746,4 @@ EXPORT_SYMBOL_GPL(dw_dma_enable); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 4fd9961d552e..d42537425438 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p, return ret; } -static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) +static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, + int len) { struct cper_mem_err_compact cmem; + /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ + if (len == sizeof(struct cper_sec_mem_err_old) && + (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) { + pr_err(FW_WARN "valid bits set for fields beyond structure\n"); + return; + } if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); if (mem->validation_bits & CPER_MEM_VALID_PA) @@ -405,8 +412,10 @@ static void cper_estatus_print_section( } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); printk("%s""section_type: memory error\n", newpfx); - if (gdata->error_data_length >= sizeof(*mem_err)) - cper_print_mem(newpfx, mem_err); + if (gdata->error_data_length >= + sizeof(struct cper_sec_mem_err_old)) + cper_print_mem(newpfx, mem_err, + gdata->error_data_length); else goto err_section_too_small; } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 113dc07fbfbe..9ea86d2ac054 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -292,6 +292,15 @@ static int brcmstb_gpio_remove(struct platform_device *pdev) struct brcmstb_gpio_bank *bank; int ret = 0; + if (!priv) { + dev_err(&pdev->dev, "called %s without drvdata!\n", __func__); + return -EFAULT; + } + + /* + * You can lose return values below, but we report all errors, and it's + * more important to actually perform all of the steps. + */ list_for_each(pos, &priv->bank_list) { bank = list_entry(pos, struct brcmstb_gpio_bank, node); ret = bgpio_remove(&bank->bgc); @@ -414,6 +423,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + platform_set_drvdata(pdev, priv); + INIT_LIST_HEAD(&priv->bank_list); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg_base = devm_ioremap_resource(dev, res); @@ -434,7 +445,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) priv->parent_irq = -ENOENT; } - INIT_LIST_HEAD(&priv->bank_list); if (brcmstb_gpio_sanity_check_banks(dev, np, res)) return -EINVAL; @@ -515,8 +525,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", num_banks, priv->gpio_base, gpio_base - 1); - platform_set_drvdata(pdev, priv); - return 0; fail: diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c index aed4ca9338bc..7d3c90e9da71 100644 --- a/drivers/gpio/gpio-max732x.c +++ b/drivers/gpio/gpio-max732x.c @@ -603,6 +603,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip, gc->base = gpio_start; gc->ngpio = port; gc->label = chip->client->name; + gc->dev = &chip->client->dev; gc->owner = THIS_MODULE; return port; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index a2ff0eec5b36..dba67b230e98 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -500,8 +500,10 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) raw_spin_lock_irqsave(&bank->lock, flags); retval = omap_set_gpio_triggering(bank, offset, type); - if (retval) + if (retval) { + spin_unlock_irqrestore(&bank->lock, flags); goto error; + } omap_gpio_init_irq(bank, offset); if (!omap_gpio_is_input(bank, offset)) { raw_spin_unlock_irqrestore(&bank->lock, flags); @@ -1185,6 +1187,7 @@ static int omap_gpio_probe(struct platform_device *pdev) bank->irq = res->start; bank->dev = dev; bank->chip.dev = dev; + bank->chip.owner = THIS_MODULE; bank->dbck_flag = pdata->dbck_flag; bank->stride = pdata->bank_stride; bank->width = pdata->bank_width; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index d233eb3b8132..50caeb1ee350 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -570,6 +570,10 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, "could not connect irqchip to gpiochip\n"); return ret; } + + gpiochip_set_chained_irqchip(&chip->gpio_chip, + &pca953x_irq_chip, + client->irq, NULL); } return 0; diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index 77fe5d3cb105..d5284dfe01fe 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -220,9 +220,9 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc) if (!chip->gpio_width[1]) return; - xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET, + xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET, chip->gpio_state[1]); - xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET, + xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET, chip->gpio_dir[1]); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index bb3aacad4d3b..27348e7cb705 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -757,6 +757,7 @@ static int zynq_gpio_remove(struct platform_device *pdev) gpiochip_remove(&gpio->chip); clk_disable_unprepare(gpio->clk); device_set_wakeup_capable(&pdev->dev, 0); + pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 01657830b470..e9fde72cf038 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1614,6 +1614,9 @@ struct amdgpu_uvd { #define AMDGPU_MAX_VCE_HANDLES 16 #define AMDGPU_VCE_FIRMWARE_OFFSET 256 +#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) +#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) + struct amdgpu_vce { struct amdgpu_bo *vcpu_bo; uint64_t gpu_addr; @@ -1626,6 +1629,7 @@ struct amdgpu_vce { const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; + unsigned harvest_config; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d63135bf29c0..1f040d85ac47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_ib *ib; int i, j, r; @@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, for (j = 0; j < num_deps; ++j) { struct amdgpu_fence *fence; struct amdgpu_ring *ring; + struct amdgpu_ctx *ctx; r = amdgpu_cs_get_ring(adev, deps[j].ip_type, deps[j].ip_instance, @@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, if (r) return r; + ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); + if (ctx == NULL) + return -EINVAL; + r = amdgpu_fence_recreate(ring, p->filp, deps[j].handle, &fence); - if (r) + if (r) { + amdgpu_ctx_put(ctx); return r; + } amdgpu_sync_fence(&ib->sync, fence); amdgpu_fence_unref(&fence); + amdgpu_ctx_put(ctx); } } @@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, wait->in.ring, &ring); - if (r) + if (r) { + amdgpu_ctx_put(ctx); return r; + } r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); - if (r) + if (r) { + amdgpu_ctx_put(ctx); return r; + } r = fence_wait_timeout(&fence->base, true, timeout); amdgpu_fence_unref(&fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ba46be361c9b..d79009b65867 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1207,10 +1207,15 @@ static int amdgpu_early_init(struct amdgpu_device *adev) } else { if (adev->ip_blocks[i].funcs->early_init) { r = adev->ip_blocks[i].funcs->early_init((void *)adev); - if (r) + if (r == -ENOENT) + adev->ip_block_enabled[i] = false; + else if (r) return r; + else + adev->ip_block_enabled[i] = true; + } else { + adev->ip_block_enabled[i] = true; } - adev->ip_block_enabled[i] = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5533434c7a8f..31ad444c6386 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); dev_info.vram_type = adev->mc.vram_type; dev_info.vram_bit_width = adev->mc.vram_width; + dev_info.vce_harvest_config = adev->vce.harvest_config; return copy_to_user(out, &dev_info, min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index f75a31df30bd..ace870afc7d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev) amdgpu_free_extended_power_table(adev); } +#define ixSMUSVI_NB_CURRENTVID 0xD8230044 +#define CURRENT_NB_VID_MASK 0xff000000 +#define CURRENT_NB_VID__SHIFT 24 +#define ixSMUSVI_GFX_CURRENTVID 0xD8230048 +#define CURRENT_GFX_VID_MASK 0xff000000 +#define CURRENT_GFX_VID__SHIFT 24 + static void cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct seq_file *m) { + struct cz_power_info *pi = cz_get_pi(adev); struct amdgpu_clock_voltage_dependency_table *table = &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 current_index = - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - u32 sclk, tmp; - u16 vddc; - - if (current_index >= NUM_SCLK_LEVELS) { - seq_printf(m, "invalid dpm profile %d\n", current_index); + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); + u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); + u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); + u32 sclk, vclk, dclk, ecclk, tmp; + u16 vddnb, vddgfx; + + if (sclk_index >= NUM_SCLK_LEVELS) { + seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); } else { - sclk = table->entries[current_index].clk; - tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; - vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); - seq_printf(m, "power level %d sclk: %u vddc: %u\n", - current_index, sclk, vddc); + sclk = table->entries[sclk_index].clk; + seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); + } + + tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; + vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); + tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; + vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); + seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); + + seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); + if (!pi->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); + } else { + vclk = uvd_table->entries[uvd_index].vclk; + dclk = uvd_table->entries[uvd_index].dclk; + seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); + } + } + + seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); + if (!pi->vce_power_gated) { + if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "invalid vce dpm level %d\n", vce_index); + } else { + ecclk = vce_table->entries[vce_index].ecclk; + seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); + } } } @@ -1679,25 +1717,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) if (ret) return ret; - DRM_INFO("DPM unforce state min=%d, max=%d.\n", - pi->sclk_dpm.soft_min_clk, - pi->sclk_dpm.soft_max_clk); + DRM_DEBUG("DPM unforce state min=%d, max=%d.\n", + pi->sclk_dpm.soft_min_clk, + pi->sclk_dpm.soft_max_clk); return 0; } static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, - enum amdgpu_dpm_forced_level level) + enum amdgpu_dpm_forced_level level) { int ret = 0; switch (level) { case AMDGPU_DPM_FORCED_LEVEL_HIGH: + ret = cz_dpm_unforce_dpm_levels(adev); + if (ret) + return ret; ret = cz_dpm_force_highest(adev); if (ret) return ret; break; case AMDGPU_DPM_FORCED_LEVEL_LOW: + ret = cz_dpm_unforce_dpm_levels(adev); + if (ret) + return ret; ret = cz_dpm_force_lowest(adev); if (ret) return ret; @@ -1711,6 +1755,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, break; } + adev->pm.dpm.forced_level = level; + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 6e77964f1b64..e70a26f587a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v10_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v10_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7f7abb0e0be5..dcb402ee048a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v11_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v11_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 08387dfd98a7..cc050a329c49 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v8_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v8_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 7b683fb2173c..1c7c992dea37 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1813,10 +1813,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev, u32 data, mask; data = RREG32(mmCC_RB_BACKEND_DISABLE); - if (data & 1) - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - else - data = 0; + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index d62c4002e39c..d1064ca3670e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -35,6 +35,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" #include "gca/gfx_8_0_d.h" +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 @@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { + + if (adev->vce.harvest_config & (1 << idx)) + continue; + if(idx == 0) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); @@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev) return 0; } +#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 +#define VCE_HARVEST_FUSE_MACRO__SHIFT 27 +#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 + +static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) +{ + u32 tmp; + unsigned ret; + + if (adev->flags & AMDGPU_IS_APU) + tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & + VCE_HARVEST_FUSE_MACRO__MASK) >> + VCE_HARVEST_FUSE_MACRO__SHIFT; + else + tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & + CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> + CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; + + switch (tmp) { + case 1: + ret = AMDGPU_VCE_HARVEST_VCE0; + break; + case 2: + ret = AMDGPU_VCE_HARVEST_VCE1; + break; + case 3: + ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; + break; + default: + ret = 0; + } + + return ret; +} + static int vce_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); + + if ((adev->vce.harvest_config & + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) + return -ENOENT; + vce_v3_0_set_ring_funcs(adev); vce_v3_0_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index fa5a4448531d..68552da40287 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } +/* smu_8_0_d.h */ +#define mmMP0PUB_IND_INDEX 0x180 +#define mmMP0PUB_IND_DATA 0x181 + +static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmMP0PUB_IND_INDEX, (reg)); + r = RREG32(mmMP0PUB_IND_DATA); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return r; +} + +static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmMP0PUB_IND_INDEX, (reg)); + WREG32(mmMP0PUB_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); +} + static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags; @@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle) bool smc_enabled = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->smc_rreg = &vi_smc_rreg; - adev->smc_wreg = &vi_smc_wreg; + if (adev->flags & AMDGPU_IS_APU) { + adev->smc_rreg = &cz_smc_rreg; + adev->smc_wreg = &cz_smc_wreg; + } else { + adev->smc_rreg = &vi_smc_rreg; + adev->smc_wreg = &vi_smc_wreg; + } adev->pcie_rreg = &vi_pcie_rreg; adev->pcie_wreg = &vi_pcie_wreg; adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 42d2ffa08716..01ffe9bffe38 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); - crtc->mode = *adj; - val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA; if (val != dcrtc->dumb_ctrl) { dcrtc->dumb_ctrl = val; diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 580e10acaa3a..60a688ef81c7 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj) if (dobj->obj.import_attach) { /* We only ever display imported data */ - dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt, - DMA_TO_DEVICE); + if (dobj->sgt) + dma_buf_unmap_attachment(dobj->obj.import_attach, + dobj->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(&dobj->obj, NULL); } diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index c5b06fdb459c..e939faba7fcc 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -7,6 +7,7 @@ * published by the Free Software Foundation. */ #include <drm/drmP.h> +#include <drm/drm_plane_helper.h> #include "armada_crtc.h" #include "armada_drm.h" #include "armada_fb.h" @@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data) if (fb) armada_drm_queue_unref_work(dcrtc->crtc.dev, fb); -} -static unsigned armada_limit(int start, unsigned size, unsigned max) -{ - int end = start + size; - if (end < 0) - return 0; - if (start < 0) - start = 0; - return (unsigned)end > max ? max - start : end - start; + wake_up(&dplane->vbl.wait); } static int @@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, { struct armada_plane *dplane = drm_to_armada_plane(plane); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct drm_rect src = { + .x1 = src_x, + .y1 = src_y, + .x2 = src_x + src_w, + .y2 = src_y + src_h, + }; + struct drm_rect dest = { + .x1 = crtc_x, + .y1 = crtc_y, + .x2 = crtc_x + crtc_w, + .y2 = crtc_y + crtc_h, + }; + const struct drm_rect clip = { + .x2 = crtc->mode.hdisplay, + .y2 = crtc->mode.vdisplay, + }; uint32_t val, ctrl0; unsigned idx = 0; + bool visible; int ret; - crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay); - crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay); + ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, + 0, INT_MAX, true, false, &visible); + if (ret) + return ret; + ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) | CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) | CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA; /* Does the position/size result in nothing to display? */ - if (crtc_w == 0 || crtc_h == 0) { + if (!visible) ctrl0 &= ~CFG_DMA_ENA; - } - - /* - * FIXME: if the starting point is off screen, we need to - * adjust src_x, src_y, src_w, src_h appropriately, and - * according to the scale. - */ if (!dcrtc->plane) { dcrtc->plane = plane; @@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, /* FIXME: overlay on an interlaced display */ /* Just updating the position/size? */ if (plane->fb == fb && dplane->ctrl0 == ctrl0) { - val = (src_h & 0xffff0000) | src_w >> 16; + val = (drm_rect_height(&src) & 0xffff0000) | + drm_rect_width(&src) >> 16; dplane->src_hw = val; writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN); - val = crtc_h << 16 | crtc_w; + + val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest); dplane->dst_hw = val; writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN); - val = crtc_y << 16 | crtc_x; + + val = dest.y1 << 16 | dest.x1; dplane->dst_yx = val; writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN); + return 0; } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) { /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */ @@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, dcrtc->base + LCD_SPU_SRAM_PARA1); } - ret = wait_event_timeout(dplane->vbl.wait, - list_empty(&dplane->vbl.update.node), - HZ/25); - if (ret < 0) - return ret; + wait_event_timeout(dplane->vbl.wait, + list_empty(&dplane->vbl.update.node), + HZ/25); if (plane->fb != fb) { struct armada_gem_object *obj = drm_fb_obj(fb); - uint32_t sy, su, sv; + uint32_t addr[3], pixel_format; + int i, num_planes, hsub; /* * Take a reference on the new framebuffer - we want to @@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, older_fb); } - src_y >>= 16; - src_x >>= 16; - sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] + - src_x * fb->bits_per_pixel / 8; - su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] + - src_x; - sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] + - src_x; + src_y = src.y1 >> 16; + src_x = src.x1 >> 16; - armada_reg_queue_set(dplane->vbl.regs, idx, sy, + pixel_format = fb->pixel_format; + hsub = drm_format_horz_chroma_subsampling(pixel_format); + num_planes = drm_format_num_planes(pixel_format); + + /* + * Annoyingly, shifting a YUYV-format image by one pixel + * causes the U/V planes to toggle. Toggle the UV swap. + * (Unfortunately, this causes momentary colour flickering.) + */ + if (src_x & (hsub - 1) && num_planes == 1) + ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV); + + for (i = 0; i < num_planes; i++) + addr[i] = obj->dev_addr + fb->offsets[i] + + src_y * fb->pitches[i] + + src_x * drm_format_plane_cpp(pixel_format, i); + for (; i < ARRAY_SIZE(addr); i++) + addr[i] = 0; + + armada_reg_queue_set(dplane->vbl.regs, idx, addr[0], LCD_SPU_DMA_START_ADDR_Y0); - armada_reg_queue_set(dplane->vbl.regs, idx, su, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[1], LCD_SPU_DMA_START_ADDR_U0); - armada_reg_queue_set(dplane->vbl.regs, idx, sv, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[2], LCD_SPU_DMA_START_ADDR_V0); - armada_reg_queue_set(dplane->vbl.regs, idx, sy, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[0], LCD_SPU_DMA_START_ADDR_Y1); - armada_reg_queue_set(dplane->vbl.regs, idx, su, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[1], LCD_SPU_DMA_START_ADDR_U1); - armada_reg_queue_set(dplane->vbl.regs, idx, sv, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[2], LCD_SPU_DMA_START_ADDR_V1); val = fb->pitches[0] << 16 | fb->pitches[0]; @@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, LCD_SPU_DMA_PITCH_UV); } - val = (src_h & 0xffff0000) | src_w >> 16; + val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16; if (dplane->src_hw != val) { dplane->src_hw = val; armada_reg_queue_set(dplane->vbl.regs, idx, val, LCD_SPU_DMA_HPXL_VLN); } - val = crtc_h << 16 | crtc_w; + + val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest); if (dplane->dst_hw != val) { dplane->dst_hw = val; armada_reg_queue_set(dplane->vbl.regs, idx, val, LCD_SPU_DZM_HPXL_VLN); } - val = crtc_y << 16 | crtc_x; + + val = dest.y1 << 16 | dest.x1; if (dplane->dst_yx != val) { dplane->dst_yx = val; armada_reg_queue_set(dplane->vbl.regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN); } + if (dplane->ctrl0 != ctrl0) { dplane->ctrl0 = ctrl0; armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0, @@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane) static void armada_plane_destroy(struct drm_plane *plane) { - kfree(plane); + struct armada_plane *dplane = drm_to_armada_plane(plane); + + drm_plane_cleanup(plane); + + kfree(dplane); } static int armada_plane_set_property(struct drm_plane *plane, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index f69b92535505..5ae5c6923128 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); + drm_crtc_vblank_reset(&crtc->base); dc->crtc = &crtc->base; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 60b0c13d7ff5..6fad1f9648f3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) pm_runtime_enable(dev->dev); - ret = atmel_hlcdc_dc_modeset_init(dev); + ret = drm_vblank_init(dev, 1); if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); + dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } - drm_mode_config_reset(dev); - - ret = drm_vblank_init(dev, 1); + ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } + drm_mode_config_reset(dev); + pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b9ba06176eb1..fed748311b92 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - /* For some reason crtc x/y offsets are signed internally. */ - if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) + /* + * Universal plane src offsets are only 16.16, prevent havoc for + * drivers using universal plane code internally. + */ + if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) return -ERANGE; drm_modeset_lock_all(dev); @@ -5395,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - connector->status = connector_status_unknown; - + list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->funcs->reset) connector->funcs->reset(connector); - } } EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index aa8bbb460c57..9cfcd0aef0df 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -70,6 +70,8 @@ #define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) +#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t) + typedef struct drm_version_32 { int version_major; /**< Major version */ int version_minor; /**< Minor version */ @@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, return 0; } +typedef struct drm_mode_fb_cmd232 { + u32 fb_id; + u32 width; + u32 height; + u32 pixel_format; + u32 flags; + u32 handles[4]; + u32 pitches[4]; + u32 offsets[4]; + u64 modifier[4]; +} __attribute__((packed)) drm_mode_fb_cmd232_t; + +static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg; + struct drm_mode_fb_cmd232 req32; + struct drm_mode_fb_cmd2 __user *req64; + int i; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + req64 = compat_alloc_user_space(sizeof(*req64)); + + if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64)) + || __put_user(req32.width, &req64->width) + || __put_user(req32.height, &req64->height) + || __put_user(req32.pixel_format, &req64->pixel_format) + || __put_user(req32.flags, &req64->flags)) + return -EFAULT; + + for (i = 0; i < 4; i++) { + if (__put_user(req32.handles[i], &req64->handles[i])) + return -EFAULT; + if (__put_user(req32.pitches[i], &req64->pitches[i])) + return -EFAULT; + if (__put_user(req32.offsets[i], &req64->offsets[i])) + return -EFAULT; + if (__put_user(req32.modifier[i], &req64->modifier[i])) + return -EFAULT; + } + + err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64); + if (err) + return err; + + if (__get_user(req32.fb_id, &req64->fb_id)) + return -EFAULT; + + if (copy_to_user(argp, &req32, sizeof(req32))) + return -EFAULT; + + return 0; +} + static drm_ioctl_compat_t *drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, @@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw, #endif [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, + [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2, }; /** diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 542fac628b28..5f27290201e0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -826,6 +826,7 @@ struct intel_context { struct kref ref; int user_handle; uint8_t remap_slice; + struct drm_i915_private *i915; struct drm_i915_file_private *file_priv; struct i915_ctx_hang_stats hang_stats; struct i915_hw_ppgtt *ppgtt; @@ -2036,8 +2037,6 @@ struct drm_i915_gem_object { unsigned int cache_level:3; unsigned int cache_dirty:1; - unsigned int has_dma_mapping:1; - unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; unsigned int pin_display; @@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor); int i915_debugfs_connector_add(struct drm_connector *connector); void intel_display_crc_init(struct drm_device *dev); #else -static inline int i915_debugfs_connector_add(struct drm_connector *connector) {} +static inline int i915_debugfs_connector_add(struct drm_connector *connector) +{ return 0; } static inline void intel_display_crc_init(struct drm_device *dev) {} #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 248fd1ac7b3a..52b446b27b4d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) sg_dma_len(sg) = obj->base.size; obj->pages = st; - obj->has_dma_mapping = true; return 0; } @@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) sg_free_table(obj->pages); kfree(obj->pages); - - obj->has_dma_mapping = false; } static void @@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + i915_gem_gtt_finish_object(obj); + if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct sg_page_iter sg_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ + int ret; gfp_t gfp; /* Assert that the object is not currently in any GPU domain. As it @@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) */ i915_gem_shrink_all(dev_priv); page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) + if (IS_ERR(page)) { + ret = PTR_ERR(page); goto err_pages; + } } #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { @@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) sg_mark_end(sg); obj->pages = st; + ret = i915_gem_gtt_prepare_object(obj); + if (ret) + goto err_pages; + if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj); @@ -2300,10 +2306,10 @@ err_pages: * space and so want to translate the error from shmemfs back to our * usual understanding of ENOMEM. */ - if (PTR_ERR(page) == -ENOSPC) - return -ENOMEM; - else - return PTR_ERR(page); + if (ret == -ENOSPC) + ret = -ENOMEM; + + return ret; } /* Ensure that the associated pages are gathered from the backing storage @@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring, } request->emitted_jiffies = jiffies; + ring->last_submitted_seqno = request->seqno; list_add_tail(&request->list, &ring->request_list); request->file_priv = NULL; @@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma) /* Since the unbound list is global, only move to that list if * no more VMAs exist. */ - if (list_empty(&obj->vma_list)) { - i915_gem_gtt_finish_object(obj); + if (list_empty(&obj->vma_list)) list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); - } /* And finally now the object is completely decoupled from this vma, * we can drop its hold on the backing storage and allow it to be @@ -3768,22 +3773,16 @@ search_free: goto err_remove_node; } - ret = i915_gem_gtt_prepare_object(obj); - if (ret) - goto err_remove_node; - trace_i915_vma_bind(vma, flags); ret = i915_vma_bind(vma, obj->cache_level, flags); if (ret) - goto err_finish_gtt; + goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&vma->mm_list, &vm->inactive_list); return vma; -err_finish_gtt: - i915_gem_gtt_finish_object(obj); err_remove_node: drm_mm_remove_node(&vma->node); err_free_vma: diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d65cbe6afb92..48afa777e94a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev) void i915_gem_context_free(struct kref *ctx_ref) { - struct intel_context *ctx = container_of(ctx_ref, - typeof(*ctx), ref); + struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); trace_i915_context_free(ctx); @@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev, kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->context_list); + ctx->i915 = dev_priv; if (dev_priv->hw_context_size) { struct drm_i915_gem_object *obj = diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 7998da27c500..e9c2bfd85b52 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) return PTR_ERR(sg); obj->pages = sg; - obj->has_dma_mapping = true; return 0; } @@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) { dma_buf_unmap_attachment(obj->base.import_attach, obj->pages, DMA_BIDIRECTIONAL); - obj->has_dma_mapping = false; } static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index dcc6a88c560e..56b52a4767d4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) { - if (obj->has_dma_mapping) - return 0; - if (!dma_map_sg(&obj->base.dev->pdev->dev, obj->pages->sgl, obj->pages->nents, PCI_DMA_BIDIRECTIONAL)) @@ -1972,10 +1969,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) interruptible = do_idling(dev_priv); - if (!obj->has_dma_mapping) - dma_unmap_sg(&dev->pdev->dev, - obj->pages->sgl, obj->pages->nents, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents, + PCI_DMA_BIDIRECTIONAL); undo_idling(dev_priv, interruptible); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 348ed5abcdbf..8b5b784c62fe 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev, if (obj->pages == NULL) goto cleanup; - obj->has_dma_mapping = true; i915_gem_object_pin_pages(obj); obj->stolen = stolen; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index d61e74a08f82..633bd1fcab69 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) if (IS_GEN4(dev)) { uint32_t ddc2 = I915_READ(DCC2); - if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) { - /* Since the swizzling may vary within an - * object, we have no idea what the swizzling - * is for any page in particular. Thus we - * cannot migrate tiled pages using the GPU, - * nor can we tell userspace what the exact - * swizzling is for any object. - */ + if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; - swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; - swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - } } if (dcc == 0xffffffff) { diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 1f4e5a32a16e..8fd431bcdfd3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -545,6 +545,26 @@ err: return ret; } +static int +__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, + struct page **pvec, int num_pages) +{ + int ret; + + ret = st_set_pages(&obj->pages, pvec, num_pages); + if (ret) + return ret; + + ret = i915_gem_gtt_prepare_object(obj); + if (ret) { + sg_free_table(obj->pages); + kfree(obj->pages); + obj->pages = NULL; + } + + return ret; +} + static void __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { @@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) if (obj->userptr.work != &work->work) { ret = 0; } else if (pinned == num_pages) { - ret = st_set_pages(&obj->pages, pvec, num_pages); + ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); if (ret == 0) { list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + pinned = 0; } } @@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) } } } else { - ret = st_set_pages(&obj->pages, pvec, num_pages); + ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); if (ret == 0) { obj->userptr.work = NULL; pinned = 0; @@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) if (obj->madv != I915_MADV_WILLNEED) obj->dirty = 0; + i915_gem_gtt_finish_object(obj); + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { struct page *page = sg_page_iter_page(&sg_iter); diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 176de6322e4d..23aa04cded6b 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) drm_ioctl_compat_t *fn = NULL; int ret; - if (nr < DRM_COMMAND_BASE) + if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END) return drm_compat_ioctl(filp, cmd, arg); if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e6bb72dca3ff..984e2fe6688c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static struct drm_i915_gem_request * -ring_last_request(struct intel_engine_cs *ring) -{ - return list_entry(ring->request_list.prev, - struct drm_i915_gem_request, list); -} - static bool -ring_idle(struct intel_engine_cs *ring) +ring_idle(struct intel_engine_cs *ring, u32 seqno) { return (list_empty(&ring->request_list) || - i915_gem_request_completed(ring_last_request(ring), false)); + i915_seqno_passed(seqno, ring->last_submitted_seqno)); } static bool @@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) acthd = intel_ring_get_active_head(ring); if (ring->hangcheck.seqno == seqno) { - if (ring_idle(ring)) { + if (ring_idle(ring, seqno)) { ring->hangcheck.action = HANGCHECK_IDLE; if (waitqueue_active(&ring->irq_queue)) { diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 497cba5deb1e..849a2590e010 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context, TP_fast_assign( __entry->ctx = ctx; __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; - __entry->dev = ctx->file_priv->dev_priv->dev->primary->index; + __entry->dev = ctx->i915->dev->primary->index; ), TP_printk("dev=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 647b1404c441..30e0f54ba19d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6315,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_connector *connector; struct drm_i915_private *dev_priv = dev->dev_private; - /* crtc should still be enabled when we disable it. */ - WARN_ON(!crtc->state->enable); - intel_crtc_disable_planes(crtc); dev_priv->display.crtc_disable(crtc); dev_priv->display.off(crtc); @@ -12591,7 +12588,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc, continue; if (!crtc_state->enable) { - intel_crtc_disable(crtc); + if (crtc->state->enable) + intel_crtc_disable(crtc); } else if (crtc->state->enable) { intel_crtc_disable_planes(crtc); dev_priv->display.crtc_disable(crtc); @@ -13276,7 +13274,7 @@ intel_check_primary_plane(struct drm_plane *plane, if (ret) return ret; - if (intel_crtc->active) { + if (crtc_state ? crtc_state->base.active : intel_crtc->active) { struct intel_plane_state *old_state = to_intel_plane_state(plane->state); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index e539314ae87e..4be66f60504d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -275,6 +275,13 @@ struct intel_engine_cs { * Do we have some not yet emitted requests outstanding? */ struct drm_i915_gem_request *outstanding_lazy_request; + /** + * Seqno of request most recently submitted to request_list. + * Used exclusively by hang checker to avoid grabbing lock while + * inspecting request list. + */ + u32 last_submitted_seqno; + bool gpu_caches_dirty; wait_queue_head_t irq_queue; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index a6d8a3ee7750..260389acfb77 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; + unsigned size; + u64 offset; int i, ret = 0; for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (entry->offset == reg->offset && + if (entry->offset == (reg->offset & -entry->size) && (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) break; } @@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev, if (i == ARRAY_SIZE(whitelist)) return -EINVAL; + /* We use the low bits to encode extra flags as the register should + * be naturally aligned (and those that are not so aligned merely + * limit the available flags for that register). + */ + offset = entry->offset; + size = entry->size; + size |= reg->offset ^ offset; + intel_runtime_pm_get(dev_priv); - switch (entry->size) { + switch (size) { + case 8 | 1: + reg->val = I915_READ64_2x32(offset, offset+4); + break; case 8: - reg->val = I915_READ64(reg->offset); + reg->val = I915_READ64(offset); break; case 4: - reg->val = I915_READ(reg->offset); + reg->val = I915_READ(offset); break; case 2: - reg->val = I915_READ16(reg->offset); + reg->val = I915_READ16(offset); break; case 1: - reg->val = I915_READ8(reg->offset); + reg->val = I915_READ8(offset); break; default: - MISSING_CASE(entry->size); ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 214eceefc981..e671ad369416 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder) switch (tve->mode) { case TVE_MODE_VGA: - imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24, + imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, tve->hsync_pin, tve->vsync_pin); break; case TVE_MODE_TVOUT: diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 74a9ce40ddc4..b4deb9cf9d71 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -21,6 +21,7 @@ #include <drm/drm_panel.h> #include <linux/videodev2.h> #include <video/of_display_timing.h> +#include <linux/of_graph.h> #include "imx-drm.h" @@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct device_node *np = dev->of_node; - struct device_node *panel_node; + struct device_node *port; const u8 *edidp; struct imx_parallel_display *imxpd; int ret; @@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; } - panel_node = of_parse_phandle(np, "fsl,panel", 0); - if (panel_node) { - imxpd->panel = of_drm_find_panel(panel_node); - if (!imxpd->panel) - return -EPROBE_DEFER; + /* port@1 is the output port */ + port = of_graph_get_port_by_id(np, 1); + if (port) { + struct device_node *endpoint, *remote; + + endpoint = of_get_child_by_name(port, "endpoint"); + if (endpoint) { + remote = of_graph_get_remote_port_parent(endpoint); + if (remote) + imxpd->panel = of_drm_find_panel(remote); + if (!imxpd->panel) + return -EPROBE_DEFER; + } } imxpd->dev = dev; diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 8730562323a8..4a09947be244 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev) tmp |= DPM_ENABLED; break; default: - DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); + DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift); break; } WREG32_SMC(CNB_PWRMGT_CNTL, tmp); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 5450fa95a47e..c4777c8d0312 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, } } } - mb(); - radeon_gart_tlb_flush(rdev); + if (rdev->gart.ptr) { + mb(); + radeon_gart_tlb_flush(rdev); + } } /** @@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, page_base += RADEON_GPU_PAGE_SIZE; } } - mb(); - radeon_gart_tlb_flush(rdev); + if (rdev->gart.ptr) { + mb(); + radeon_gart_tlb_flush(rdev); + } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 013ec7106e55..3dcc5733ff69 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) if (robj) { if (robj->gem_base.import_attach) drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); + radeon_mn_unregister(robj); radeon_bo_unref(&robj); } } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 318165d4855c..676362769b8d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct radeon_bo, tbo); radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); - radeon_mn_unregister(bo); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 1dbdf3230dae..787cd8fd897f 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 01b558fe3695..9a0c2911272a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = { .probe = rockchip_drm_platform_probe, .remove = rockchip_drm_platform_remove, .driver = { - .owner = THIS_MODULE, .name = "rockchip-drm", .of_match_table = rockchip_drm_dt_ids, .pm = &rockchip_drm_pm_ops, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 77d52893d40f..002645bb5bbf 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev) struct rockchip_drm_private *private = dev->dev_private; struct drm_fb_helper *fb_helper = &private->fbdev_helper; - drm_fb_helper_hotplug_event(fb_helper); + if (fb_helper) + drm_fb_helper_hotplug_event(fb_helper); } static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index eb2282cc4a56..eba5f8a52fbd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) &rk_obj->dma_attrs); } -int rockchip_gem_mmap_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) + { + int ret; struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; - unsigned long vm_size; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - vm_size = vma->vm_end - vma->vm_start; - - if (vm_size > obj->size) - return -EINVAL; + /* + * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). + */ + vma->vm_flags &= ~VM_PFNMAP; - return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, + ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, &rk_obj->dma_attrs); + if (ret) + drm_gem_vm_close(vma); + + return ret; } -/* drm driver mmap file operations */ -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) +int rockchip_gem_mmap_buf(struct drm_gem_object *obj, + struct vm_area_struct *vma) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_gem_object *obj; - struct drm_vma_offset_node *node; + struct drm_device *drm = obj->dev; int ret; - if (drm_device_is_unplugged(dev)) - return -ENODEV; + mutex_lock(&drm->struct_mutex); + ret = drm_gem_mmap_obj(obj, obj->size, vma); + mutex_unlock(&drm->struct_mutex); + if (ret) + return ret; - mutex_lock(&dev->struct_mutex); + return rockchip_drm_gem_object_mmap(obj, vma); +} - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); - if (!node) { - mutex_unlock(&dev->struct_mutex); - DRM_ERROR("failed to find vma node.\n"); - return -EINVAL; - } else if (!drm_vma_node_is_allowed(node, filp)) { - mutex_unlock(&dev->struct_mutex); - return -EACCES; - } +/* drm driver mmap file operations */ +int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_object *obj; + int ret; - obj = container_of(node, struct drm_gem_object, vma_node); - ret = rockchip_gem_mmap_buf(obj, vma); + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; - mutex_unlock(&dev->struct_mutex); + obj = vma->vm_private_data; - return ret; + return rockchip_drm_gem_object_mmap(obj, vma); } struct rockchip_gem_object * diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index dc65161d7cad..34b78e736532 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -170,6 +170,7 @@ struct vop_win_phy { struct vop_reg enable; struct vop_reg format; + struct vop_reg rb_swap; struct vop_reg act_info; struct vop_reg dsp_info; struct vop_reg dsp_st; @@ -199,8 +200,12 @@ struct vop_data { static const uint32_t formats_01[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV24, @@ -209,8 +214,12 @@ static const uint32_t formats_01[] = { static const uint32_t formats_234[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, }; static const struct vop_win_phy win01_data = { @@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = { .nformats = ARRAY_SIZE(formats_01), .enable = VOP_REG(WIN0_CTRL0, 0x1, 0), .format = VOP_REG(WIN0_CTRL0, 0x7, 1), + .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12), .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0), .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0), .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0), @@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = { .nformats = ARRAY_SIZE(formats_234), .enable = VOP_REG(WIN2_CTRL0, 0x1, 0), .format = VOP_REG(WIN2_CTRL0, 0x7, 1), + .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12), .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0), .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0), .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0), @@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = { .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0), }; -static const struct vop_win_phy cursor_data = { - .data_formats = formats_234, - .nformats = ARRAY_SIZE(formats_234), - .enable = VOP_REG(HWC_CTRL0, 0x1, 0), - .format = VOP_REG(HWC_CTRL0, 0x7, 1), - .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0), -}; - static const struct vop_ctrl ctrl_data = { .standby = VOP_REG(SYS_CTRL, 0x1, 22), .gate_en = VOP_REG(SYS_CTRL, 0x1, 23), @@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = { /* * Note: rk3288 has a dedicated 'cursor' window, however, that window requires * special support to get alpha blending working. For now, just use overlay - * window 1 for the drm cursor. + * window 3 for the drm cursor. + * */ static const struct vop_win_data rk3288_vop_win_data[] = { { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR }, + { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY }, { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY }, + { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR }, }; static const struct vop_data rk3288_vop = { @@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset, } } +static bool has_rb_swapped(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_BGR888: + case DRM_FORMAT_BGR565: + return true; + default: + return false; + } +} + static enum vop_data_format vop_convert_format(uint32_t format) { switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: return VOP_FMT_ARGB8888; case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: return VOP_FMT_RGB888; case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: return VOP_FMT_RGB565; case DRM_FORMAT_NV12: return VOP_FMT_YUV420SP; @@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format) { switch (format) { case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: return true; default: return false; @@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane, enum vop_data_format format; uint32_t val; bool is_alpha; + bool rb_swap; bool visible; int ret; struct drm_rect dest = { @@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane, return 0; is_alpha = is_alpha_support(fb->pixel_format); + rb_swap = has_rb_swapped(fb->pixel_format); format = vop_convert_format(fb->pixel_format); if (format < 0) return format; @@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane, val = (dsp_sty - 1) << 16; val |= (dsp_stx - 1) & 0xffff; VOP_WIN_SET(vop, win, dsp_st, val); + VOP_WIN_SET(vop, win, rb_swap, rb_swap); if (is_alpha) { VOP_WIN_SET(vop, win, dst_alpha_ctl, diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 882cccdad272..ac6fe40b99f7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) else if (boot_cpu_data.x86 > 3) tmp = pgprot_noncached(tmp); #endif -#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) +#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ + defined(__powerpc__) if (caching_flags & TTM_PL_FLAG_WC) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 3077f1554099..624d941aaad1 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) } else { pool->npages_free += count; list_splice(&ttm_dma->pages_list, &pool->free_list); - npages = count; - if (pool->npages_free > _manager->options.max_size) { + /* + * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages + * to free in order to minimize calls to set_memory_wb(). + */ + if (pool->npages_free >= (_manager->options.max_size + + NUM_PAGES_TO_ALLOC)) npages = pool->npages_free - _manager->options.max_size; - /* free at least NUM_PAGES_TO_ALLOC number of pages - * to reduce calls to set_memory_wb */ - if (npages < NUM_PAGES_TO_ALLOC) - npages = NUM_PAGES_TO_ALLOC; - } } spin_unlock_irqrestore(&pool->lock, irq_flags); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6d2f39d36e44..00f2058944e5 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu) return ret; } + for (i = 0; i < IPU_NUM_IRQS; i += 32) + ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32)); + for (i = 0; i < IPU_NUM_IRQS; i += 32) { gc = irq_get_domain_generic_chip(ipu->domain, i); gc->reg_base = ipu->cm_reg; diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 3318de690e00..a2dbbbe0d8d7 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) struct cp2112_force_read_report report; int ret; + if (size > sizeof(dev->read_data)) + size = sizeof(dev->read_data); report.report = CP2112_DATA_READ_FORCE_SEND; report.length = cpu_to_be16(size); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 6a9b05b328a9..7c811252c1ce 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, /* * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" * for the stylus. + * The check for mt_report_id ensures we don't process + * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical + * collection, but within the report ID. */ if (field->physical == HID_DG_STYLUS) return 0; + else if ((field->physical == 0) && + (field->report->id != td->mt_report_id) && + (td->mt_report_id != -1)) + return 0; if (field->application == HID_DG_TOUCHSCREEN || field->application == HID_DG_TOUCHPAD) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 53e7de7cb9e2..20f9a653444c 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -87,6 +87,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 4c0ffca97bef..44958d79d598 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1271,11 +1271,13 @@ fail_leds: pad_input_dev = NULL; wacom_wac->pad_registered = false; fail_register_pad_input: - input_unregister_device(touch_input_dev); + if (touch_input_dev) + input_unregister_device(touch_input_dev); wacom_wac->touch_input = NULL; wacom_wac->touch_registered = false; fail_register_touch_input: - input_unregister_device(pen_input_dev); + if (pen_input_dev) + input_unregister_device(pen_input_dev); wacom_wac->pen_input = NULL; wacom_wac->pen_registered = false; fail_register_pen_input: diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 232da89f4e88..0d244239e55d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom) features->x_max = 4096; features->y_max = 4096; } + else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { + features->device_type |= WACOM_DEVICETYPE_PAD; + } } /* diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 4e70f51c2370..cc5a35750b50 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c @@ -1464,7 +1464,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data, { int i; - for (i = from; i >= 0; i++) { + for (i = from; i >= 0; i--) { if (data->triggers[i].indio_trig) { iio_trigger_unregister(data->triggers[i].indio_trig); data->triggers[i].indio_trig = NULL; diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index e8e2077c7244..13ea1ea23328 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev) if (src & MMA8452_TRANSIENT_SRC_XTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); if (src & MMA8452_TRANSIENT_SRC_YTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); } @@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev, static const struct iio_event_spec mma8452_transient_event[] = { { - .type = IIO_EV_TYPE_THRESH, + .type = IIO_EV_TYPE_MAG, .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_ENABLE), .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 7c5565891cb8..eb0cd897714a 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -153,8 +153,7 @@ config DA9150_GPADC config CC10001_ADC tristate "Cosmic Circuits 10001 ADC driver" - depends on HAVE_CLK || REGULATOR - depends on HAS_IOMEM + depends on HAS_IOMEM && HAVE_CLK && REGULATOR select IIO_BUFFER select IIO_TRIGGERED_BUFFER help diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 8a0eb4a04fb5..7b40925dd4ff 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -182,7 +182,7 @@ struct at91_adc_caps { u8 ts_pen_detect_sensitivity; /* startup time calculate function */ - u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz); + u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz); u8 num_channels; struct at91_adc_reg_desc registers; @@ -201,7 +201,7 @@ struct at91_adc_state { u8 num_channels; void __iomem *reg_base; struct at91_adc_reg_desc *registers; - u8 startup_time; + u32 startup_time; u8 sample_hold_time; bool sleep_mode; struct iio_trigger **trig; @@ -779,7 +779,7 @@ ret: return ret; } -static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) +static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz) { /* * Number of ticks needed to cover the startup time of the ADC @@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8; } -static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz) +static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz) { /* * For sama5d3x and at91sam9x5, the formula changes to: diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 8d9c9b9215dd..d819823f7257 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi) indio_dev->channels = chip_info->channels; indio_dev->num_channels = chip_info->num_channels; + adc->chip_info = chip_info; + adc->transfer[0].tx_buf = &adc->tx_buf; adc->transfer[0].len = sizeof(adc->tx_buf); adc->transfer[1].rx_buf = adc->rx_buf; diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 8d4e019ea4ca..9c311c1e1ac7 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = { }; module_platform_driver(rockchip_saradc_driver); + +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_DESCRIPTION("Rockchip SARADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c index 06f4792240f0..ebe415f10640 100644 --- a/drivers/iio/adc/twl4030-madc.c +++ b/drivers/iio/adc/twl4030-madc.c @@ -833,7 +833,8 @@ static int twl4030_madc_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, twl4030_madc_threaded_irq_handler, - IRQF_TRIGGER_RISING, "twl4030_madc", madc); + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "twl4030_madc", madc); if (ret) { dev_err(&pdev->dev, "could not request irq\n"); goto err_i2c; diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 480f335a0f9f..819632bf1fda 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev, struct vf610_adc *info = iio_priv(indio_dev); if ((readval == NULL) || - (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) + ((reg % 4) || (reg > VF610_REG_ADC_PCTL))) return -EINVAL; *readval = readl(info->regs + reg); diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 610fc98f88ef..595511022795 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) s32 poll_value = 0; if (state) { + if (!atomic_read(&st->user_requested_state)) + return 0; if (sensor_hub_device_open(st->hsdev)) return -EIO; @@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) poll_value = hid_sensor_read_poll_value(st); } else { - if (!atomic_dec_and_test(&st->data_ready)) + int val; + + val = atomic_dec_if_positive(&st->data_ready); + if (val < 0) return 0; + sensor_hub_device_close(st->hsdev); state_val = hid_sensor_get_usage_index(st->hsdev, st->power_state.report_id, @@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state); int hid_sensor_power_state(struct hid_sensor_common *st, bool state) { + #ifdef CONFIG_PM int ret; + atomic_set(&st->user_requested_state, state); if (state) ret = pm_runtime_get_sync(&st->pdev->dev); else { @@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) return 0; #else + atomic_set(&st->user_requested_state, state); return _hid_sensor_power_state(st, state); #endif } diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 61bb9d4239ea..e98428df0d44 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -22,7 +22,7 @@ #include "ad5624r.h" static int ad5624r_spi_write(struct spi_device *spi, - u8 cmd, u8 addr, u16 val, u8 len) + u8 cmd, u8 addr, u16 val, u8 shift) { u32 data; u8 msg[3]; @@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi, * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, * for the AD5664R, AD5644R, and AD5624R, respectively. */ - data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len)); + data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift); msg[0] = data >> 16; msg[1] = data >> 8; msg[2] = data; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 17d4bb15be4d..65ce86837177 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) return -EINVAL; } +static int inv_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_ANGL_VEL: + return IIO_VAL_INT_PLUS_NANO; + default: + return IIO_VAL_INT_PLUS_MICRO; + } + default: + return IIO_VAL_INT_PLUS_MICRO; + } + + return -EINVAL; +} static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) { int result, i; @@ -696,6 +713,7 @@ static const struct iio_info mpu_info = { .driver_module = THIS_MODULE, .read_raw = &inv_mpu6050_read_raw, .write_raw = &inv_mpu6050_write_raw, + .write_raw_get_fmt = &inv_write_raw_get_fmt, .attrs = &inv_attribute_group, .validate_trigger = inv_mpu6050_validate_trigger, }; diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index e6198b7c9cbf..a5c59251ec0e 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -188,6 +188,7 @@ config SENSORS_LM3533 config LTR501 tristate "LTR-501ALS-01 light sensor" depends on I2C + select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER help @@ -201,6 +202,7 @@ config LTR501 config STK3310 tristate "STK3310 ALS and proximity sensor" depends on I2C + select REGMAP_I2C help Say yes here to get support for the Sensortek STK3310 ambient light and proximity sensor. The STK3311 model is also supported by this diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c index 869033e48a1f..a1d4905cc9d2 100644 --- a/drivers/iio/light/cm3323.c +++ b/drivers/iio/light/cm3323.c @@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2) for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) { if (val == cm3323_int_time[i].val && val2 == cm3323_int_time[i].val2) { - reg_conf = data->reg_conf; + reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK; reg_conf |= i << CM3323_CONF_IT_SHIFT; ret = i2c_smbus_write_word_data(data->client, diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 1ef7d3773ab9..b5a0e66b5f28 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data) if (ret < 0) return ret; - data->als_contr = ret | data->chip_info->als_mode_active; + data->als_contr = status | data->chip_info->als_mode_active; ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status); if (ret < 0) diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index 84c77d42a2c6..48ff7942fa00 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -43,7 +43,6 @@ #define STK3311_CHIP_ID_VAL 0x1D #define STK3310_PSINT_EN 0x01 #define STK3310_PS_MAX_VAL 0xFFFF -#define STK3310_THRESH_MAX 0xFFFF #define STK3310_DRIVER_NAME "stk3310" #define STK3310_REGMAP_NAME "stk3310_regmap" @@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint = REG_FIELD(STK3310_REG_FLAG, 4, 4); static const struct reg_field stk3310_reg_field_flag_nf = REG_FIELD(STK3310_REG_FLAG, 0, 0); -/* - * Maximum PS values with regard to scale. Used to export the 'inverse' - * PS value (high values for far objects, low values for near objects). - */ + +/* Estimate maximum proximity values with regard to measurement scale. */ static const int stk3310_ps_max[4] = { - STK3310_PS_MAX_VAL / 64, - STK3310_PS_MAX_VAL / 16, - STK3310_PS_MAX_VAL / 4, - STK3310_PS_MAX_VAL, + STK3310_PS_MAX_VAL / 640, + STK3310_PS_MAX_VAL / 160, + STK3310_PS_MAX_VAL / 40, + STK3310_PS_MAX_VAL / 10 }; static const int stk3310_scale_table[][2] = { @@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = { /* Proximity event */ { .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, /* Out-of-proximity event */ { .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_RISING, + .dir = IIO_EV_DIR_FALLING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, @@ -203,25 +200,18 @@ static int stk3310_read_event(struct iio_dev *indio_dev, int *val, int *val2) { u8 reg; - u16 buf; + __be16 buf; int ret; - unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); if (info != IIO_EV_INFO_VALUE) return -EINVAL; - /* - * Only proximity interrupts are implemented at the moment. - * Since we're inverting proximity values, the sensor's 'high' - * threshold will become our 'low' threshold, associated with - * 'near' events. Similarly, the sensor's 'low' threshold will - * be our 'high' threshold, associated with 'far' events. - */ + /* Only proximity interrupts are implemented at the moment. */ if (dir == IIO_EV_DIR_RISING) - reg = STK3310_REG_THDL_PS; - else if (dir == IIO_EV_DIR_FALLING) reg = STK3310_REG_THDH_PS; + else if (dir == IIO_EV_DIR_FALLING) + reg = STK3310_REG_THDL_PS; else return -EINVAL; @@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, dev_err(&data->client->dev, "register read failed\n"); return ret; } - regmap_field_read(data->reg_ps_gain, &index); - *val = swab16(stk3310_ps_max[index] - buf); + *val = be16_to_cpu(buf); return IIO_VAL_INT; } @@ -246,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev, int val, int val2) { u8 reg; - u16 buf; + __be16 buf; int ret; unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); @@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev, return -EINVAL; if (dir == IIO_EV_DIR_RISING) - reg = STK3310_REG_THDL_PS; - else if (dir == IIO_EV_DIR_FALLING) reg = STK3310_REG_THDH_PS; + else if (dir == IIO_EV_DIR_FALLING) + reg = STK3310_REG_THDL_PS; else return -EINVAL; - buf = swab16(stk3310_ps_max[index] - val); + buf = cpu_to_be16(val); ret = regmap_bulk_write(data->regmap, reg, &buf, 2); if (ret < 0) dev_err(&client->dev, "failed to set PS threshold!\n"); @@ -312,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { u8 reg; - u16 buf; + __be16 buf; int ret; unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); @@ -333,15 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); return ret; } - *val = swab16(buf); - if (chan->type == IIO_PROXIMITY) { - /* - * Invert the proximity data so we return low values - * for close objects and high values for far ones. - */ - regmap_field_read(data->reg_ps_gain, &index); - *val = stk3310_ps_max[index] - *val; - } + *val = be16_to_cpu(buf); mutex_unlock(&data->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_INT_TIME: @@ -577,8 +558,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) } event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, - (dir ? IIO_EV_DIR_RISING : - IIO_EV_DIR_FALLING)); + (dir ? IIO_EV_DIR_FALLING : + IIO_EV_DIR_RISING)); iio_push_event(indio_dev, event, data->timestamp); /* Reset the interrupt flag */ @@ -623,13 +604,7 @@ static int stk3310_probe(struct i2c_client *client, if (ret < 0) return ret; - ret = iio_device_register(indio_dev); - if (ret < 0) { - dev_err(&client->dev, "device_register failed\n"); - stk3310_set_state(data, STK3310_STATE_STANDBY); - } - - if (client->irq <= 0) + if (client->irq < 0) client->irq = stk3310_gpio_probe(client); if (client->irq >= 0) { @@ -644,6 +619,12 @@ static int stk3310_probe(struct i2c_client *client, client->irq); } + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "device_register failed\n"); + stk3310_set_state(data, STK3310_STATE_STANDBY); + } + return ret; } diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c index 71c2bde275aa..f8b1df018abe 100644 --- a/drivers/iio/light/tcs3414.c +++ b/drivers/iio/light/tcs3414.c @@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev, if (val != 0) return -EINVAL; for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) { - if (val == tcs3414_times[i] * 1000) { + if (val2 == tcs3414_times[i] * 1000) { data->timing &= ~TCS3414_INTEG_MASK; data->timing |= i; return i2c_smbus_write_byte_data( diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index dcadfc4f0661..efb9350b0d76 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS config BMC150_MAGN tristate "Bosch BMC150 Magnetometer Driver" depends on I2C + select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER help diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index 187a31fdc35a..8f6b58dd7223 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data) goto err_poweroff; } if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { - dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); + dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id); ret = -ENODEV; goto err_poweroff; } - dev_dbg(&data->client->dev, "Chip id %x\n", ret); + dev_dbg(&data->client->dev, "Chip id %x\n", chip_id); preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; ret = bmc150_magn_set_odr(data, preset.odr); diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c index 7a2ea71c659a..706ebfd6297f 100644 --- a/drivers/iio/magnetometer/mmc35240.c +++ b/drivers/iio/magnetometer/mmc35240.c @@ -84,10 +84,10 @@ #define MMC35240_OTP_START_ADDR 0x1B enum mmc35240_resolution { - MMC35240_16_BITS_SLOW = 0, /* 100 Hz */ - MMC35240_16_BITS_FAST, /* 200 Hz */ - MMC35240_14_BITS, /* 333 Hz */ - MMC35240_12_BITS, /* 666 Hz */ + MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */ + MMC35240_16_BITS_FAST, /* 4.08 ms */ + MMC35240_14_BITS, /* 2.16 ms */ + MMC35240_12_BITS, /* 1.20 ms */ }; enum mmc35240_axis { @@ -100,22 +100,22 @@ static const struct { int sens[3]; /* sensitivity per X, Y, Z axis */ int nfo; /* null field output */ } mmc35240_props_table[] = { - /* 16 bits, 100Hz ODR */ + /* 16 bits, 125Hz ODR */ { {1024, 1024, 1024}, 32768, }, - /* 16 bits, 200Hz ODR */ + /* 16 bits, 250Hz ODR */ { {1024, 1024, 770}, 32768, }, - /* 14 bits, 333Hz ODR */ + /* 14 bits, 450Hz ODR */ { {256, 256, 193}, 8192, }, - /* 12 bits, 666Hz ODR */ + /* 12 bits, 800Hz ODR */ { {64, 64, 48}, 2048, @@ -133,9 +133,15 @@ struct mmc35240_data { int axis_scale[3]; }; -static const int mmc35240_samp_freq[] = {100, 200, 333, 666}; +static const struct { + int val; + int val2; +} mmc35240_samp_freq[] = { {1, 500000}, + {13, 0}, + {25, 0}, + {50, 0} }; -static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666"); +static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50"); #define MMC35240_CHANNEL(_axis) { \ .type = IIO_MAGN, \ @@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data, int i; for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++) - if (mmc35240_samp_freq[i] == val) + if (mmc35240_samp_freq[i].val == val && + mmc35240_samp_freq[i].val2 == val2) return i; return -EINVAL; } @@ -195,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set) coil_bit = MMC35240_CTRL0_RESET_BIT; return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, - MMC35240_CTRL0_REFILL_BIT, - coil_bit); + coil_bit, coil_bit); + } static int mmc35240_init(struct mmc35240_data *data) @@ -215,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data) /* * make sure we restore sensor characteristics, by doing - * a RESET/SET sequence + * a SET/RESET sequence, the axis polarity being naturally + * aligned after RESET */ - ret = mmc35240_hw_set(data, false); + ret = mmc35240_hw_set(data, true); if (ret < 0) return ret; usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); - ret = mmc35240_hw_set(data, true); + ret = mmc35240_hw_set(data, false); if (ret < 0) return ret; @@ -378,9 +386,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev, if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq)) return -EINVAL; - *val = mmc35240_samp_freq[i]; - *val2 = 0; - return IIO_VAL_INT; + *val = mmc35240_samp_freq[i].val; + *val2 = mmc35240_samp_freq[i].val2; + return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } @@ -496,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client, } data = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); data->client = client; data->regmap = regmap; data->res = MMC35240_16_BITS_SLOW; diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index 2042e375f835..3d756bd8c703 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -80,6 +80,7 @@ #define SX9500_COMPSTAT_MASK GENMASK(3, 0) #define SX9500_NUM_CHANNELS 4 +#define SX9500_CHAN_MASK GENMASK(SX9500_NUM_CHANNELS - 1, 0) struct sx9500_data { struct mutex mutex; @@ -281,7 +282,7 @@ static int sx9500_read_prox_data(struct sx9500_data *data, if (ret < 0) return ret; - *val = 32767 - (s16)be16_to_cpu(regval); + *val = be16_to_cpu(regval); return IIO_VAL_INT; } @@ -329,27 +330,29 @@ static int sx9500_read_proximity(struct sx9500_data *data, else ret = sx9500_wait_for_sample(data); - if (ret < 0) - return ret; - mutex_lock(&data->mutex); - ret = sx9500_read_prox_data(data, chan, val); if (ret < 0) - goto out; + goto out_dec_data_rdy; - ret = sx9500_dec_chan_users(data, chan->channel); + ret = sx9500_read_prox_data(data, chan, val); if (ret < 0) - goto out; + goto out_dec_data_rdy; ret = sx9500_dec_data_rdy_users(data); if (ret < 0) + goto out_dec_chan; + + ret = sx9500_dec_chan_users(data, chan->channel); + if (ret < 0) goto out; ret = IIO_VAL_INT; goto out; +out_dec_data_rdy: + sx9500_dec_data_rdy_users(data); out_dec_chan: sx9500_dec_chan_users(data, chan->channel); out: @@ -679,7 +682,7 @@ out: static int sx9500_buffer_preenable(struct iio_dev *indio_dev) { struct sx9500_data *data = iio_priv(indio_dev); - int ret, i; + int ret = 0, i; mutex_lock(&data->mutex); @@ -703,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev) static int sx9500_buffer_predisable(struct iio_dev *indio_dev) { struct sx9500_data *data = iio_priv(indio_dev); - int ret, i; + int ret = 0, i; iio_triggered_buffer_predisable(indio_dev); @@ -800,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev) unsigned int val; ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, - GENMASK(SX9500_NUM_CHANNELS, 0), - GENMASK(SX9500_NUM_CHANNELS, 0)); + SX9500_CHAN_MASK, SX9500_CHAN_MASK); if (ret < 0) return ret; @@ -821,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev) out: regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, - GENMASK(SX9500_NUM_CHANNELS, 0), 0); + SX9500_CHAN_MASK, 0); return ret; } diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c index cb2e8ad8bfdc..7a2b639eaa96 100644 --- a/drivers/iio/temperature/mlx90614.c +++ b/drivers/iio/temperature/mlx90614.c @@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev, *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: - *val = 13657; + *val = -13657; *val2 = 500000; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c index fcc49f89b946..8f21f32f9739 100644 --- a/drivers/iio/temperature/tmp006.c +++ b/drivers/iio/temperature/tmp006.c @@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev, struct tmp006_data *data = iio_priv(indio_dev); int i; + if (mask != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++) if ((val == tmp006_freqs[i][0]) && (val2 == tmp006_freqs[i][1])) { diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index c7dcfe4ca5f1..0429040304fd 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh * struct ib_ah *ah; struct ib_mad_send_wr_private *mad_send_wr; - if (device->node_type == RDMA_NODE_IB_SWITCH) + if (rdma_cap_ib_switch(device)) port_priv = ib_get_agent_port(device, 0); else port_priv = ib_get_agent_port(device, port_num); @@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh * memcpy(send_buf->mad, mad_hdr, resp_mad_len); send_buf->ah = ah; - if (device->node_type == RDMA_NODE_IB_SWITCH) { + if (rdma_cap_ib_switch(device)) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index dbddddd6fb5d..3a972ebf3c0d 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -169,6 +169,7 @@ struct cm_device { struct ib_device *ib_device; struct device *device; u8 ack_delay; + int going_down; struct cm_port *port[0]; }; @@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; unsigned long flags; + struct cm_device *cm_dev; + + cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); + if (!cm_dev) + return; spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); @@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) */ cm_id_priv->id.state = IB_CM_TIMEWAIT; wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); - queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, - msecs_to_jiffies(wait_time)); + + /* Check if the device started its remove_one */ + spin_lock_irq(&cm.lock); + if (!cm_dev->going_down) + queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, + msecs_to_jiffies(wait_time)); + spin_unlock_irq(&cm.lock); + cm_id_priv->timewait_info = NULL; } @@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id) struct cm_work *work; unsigned long flags; int ret = 0; + struct cm_device *cm_dev; + + cm_dev = ib_get_client_data(cm_id->device, &cm_client); + if (!cm_dev) + return -ENODEV; work = kmalloc(sizeof *work, GFP_ATOMIC); if (!work) @@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id) work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; - queue_delayed_work(cm.wq, &work->work, 0); + + /* Check if the device started its remove_one */ + spin_lock_irq(&cm.lock); + if (!cm_dev->going_down) { + queue_delayed_work(cm.wq, &work->work, 0); + } else { + kfree(work); + ret = -ENODEV; + } + spin_unlock_irq(&cm.lock); + out: return ret; } @@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, enum ib_cm_event_type event; u16 attr_id; int paths = 0; + int going_down = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: @@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = port; - queue_delayed_work(cm.wq, &work->work, 0); + + /* Check if the device started its remove_one */ + spin_lock_irq(&cm.lock); + if (!port->cm_dev->going_down) + queue_delayed_work(cm.wq, &work->work, 0); + else + going_down = 1; + spin_unlock_irq(&cm.lock); + + if (going_down) { + kfree(work); + ib_free_recv_mad(mad_recv_wc); + } } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, @@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device) cm_dev->ib_device = ib_device; cm_get_ack_delay(cm_dev); - + cm_dev->going_down = 0; cm_dev->device = device_create(&cm_class, &ib_device->dev, MKDEV(0, 0), NULL, "%s", ib_device->name); @@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device) list_del(&cm_dev->list); write_unlock_irqrestore(&cm.device_lock, flags); + spin_lock_irq(&cm.lock); + cm_dev->going_down = 1; + spin_unlock_irq(&cm.lock); + for (i = 1; i <= ib_device->phys_port_cnt; i++) { if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); - ib_unregister_mad_agent(port->mad_agent); + /* + * We flush the queue here after the going_down set, this + * verify that no new works will be queued in the recv handler, + * after that we can call the unregister_mad_agent + */ flush_workqueue(cm.wq); + ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); } device_unregister(cm_dev->device); diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index e6ffa2e66c1a..22a3abee2a54 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c @@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) err_str = "Invalid port mapper client"; goto pid_query_error; } - if (iwpm_registered_client(nl_client)) + if (iwpm_check_registration(nl_client, IWPM_REG_VALID) || + iwpm_user_pid == IWPM_PID_UNAVAILABLE) return 0; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); if (!skb) { @@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ - iwpm_set_registered(nl_client, 1); iwpm_user_pid = IWPM_PID_UNAVAILABLE; err_str = "Unable to send a nlmsg"; goto pid_query_error; @@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) err_str = "Invalid port mapper client"; goto add_mapping_error; } - if (!iwpm_registered_client(nl_client)) { + if (!iwpm_valid_pid()) + return 0; + if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) { err_str = "Unregistered port mapper client"; goto add_mapping_error; } - if (!iwpm_valid_pid()) - return 0; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); if (!skb) { err_str = "Unable to create a nlmsg"; @@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) err_str = "Invalid port mapper client"; goto query_mapping_error; } - if (!iwpm_registered_client(nl_client)) { + if (!iwpm_valid_pid()) + return 0; + if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) { err_str = "Unregistered port mapper client"; goto query_mapping_error; } - if (!iwpm_valid_pid()) - return 0; ret = -ENOMEM; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); if (!skb) { @@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) err_str = "Invalid port mapper client"; goto remove_mapping_error; } - if (!iwpm_registered_client(nl_client)) { + if (!iwpm_valid_pid()) + return 0; + if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) { err_str = "Unregistered port mapper client"; goto remove_mapping_error; } - if (!iwpm_valid_pid()) - return 0; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); if (!skb) { ret = -ENOMEM; @@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb) pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", __func__, iwpm_user_pid); if (iwpm_valid_client(nl_client)) - iwpm_set_registered(nl_client, 1); + iwpm_set_registration(nl_client, IWPM_REG_VALID); register_pid_response_exit: nlmsg_request->request_done = 1; /* always for found nlmsg_request */ @@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; const char *msg_type = "Mapping Info response"; - int iwpm_pid; u8 nl_client; char *iwpm_name; u16 iwpm_version; @@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) __func__, nl_client); return ret; } - iwpm_set_registered(nl_client, 0); + iwpm_set_registration(nl_client, IWPM_REG_INCOMPL); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); + iwpm_user_pid = cb->nlh->nlmsg_pid; if (!iwpm_mapinfo_available()) return 0; - iwpm_pid = cb->nlh->nlmsg_pid; pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", - __func__, iwpm_pid); - ret = iwpm_send_mapinfo(nl_client, iwpm_pid); + __func__, iwpm_user_pid); + ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid); return ret; } EXPORT_SYMBOL(iwpm_mapping_info_cb); diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index a626795bf9c7..5fb089e91353 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -78,6 +78,7 @@ init_exit: mutex_unlock(&iwpm_admin_lock); if (!ret) { iwpm_set_valid(nl_client, 1); + iwpm_set_registration(nl_client, IWPM_REG_UNDEF); pr_debug("%s: Mapinfo and reminfo tables are created\n", __func__); } @@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client) } mutex_unlock(&iwpm_admin_lock); iwpm_set_valid(nl_client, 0); + iwpm_set_registration(nl_client, IWPM_REG_UNDEF); return 0; } EXPORT_SYMBOL(iwpm_exit); @@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid) } /* valid client */ -int iwpm_registered_client(u8 nl_client) +u32 iwpm_get_registration(u8 nl_client) { return iwpm_admin.reg_list[nl_client]; } /* valid client */ -void iwpm_set_registered(u8 nl_client, int reg) +void iwpm_set_registration(u8 nl_client, u32 reg) { iwpm_admin.reg_list[nl_client] = reg; } +/* valid client */ +u32 iwpm_check_registration(u8 nl_client, u32 reg) +{ + return (iwpm_get_registration(nl_client) & reg); +} + int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, struct sockaddr_storage *b_sockaddr) { diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h index ee2d9ff095be..b7b9e194ce81 100644 --- a/drivers/infiniband/core/iwpm_util.h +++ b/drivers/infiniband/core/iwpm_util.h @@ -58,6 +58,10 @@ #define IWPM_PID_UNDEFINED -1 #define IWPM_PID_UNAVAILABLE -2 +#define IWPM_REG_UNDEF 0x01 +#define IWPM_REG_VALID 0x02 +#define IWPM_REG_INCOMPL 0x04 + struct iwpm_nlmsg_request { struct list_head inprocess_list; __u32 nlmsg_seq; @@ -88,7 +92,7 @@ struct iwpm_admin_data { atomic_t refcount; atomic_t nlmsg_seq; int client_list[RDMA_NL_NUM_CLIENTS]; - int reg_list[RDMA_NL_NUM_CLIENTS]; + u32 reg_list[RDMA_NL_NUM_CLIENTS]; }; /** @@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client); void iwpm_set_valid(u8 nl_client, int valid); /** - * iwpm_registered_client - Check if the port mapper client is registered + * iwpm_check_registration - Check if the client registration + * matches the given one * @nl_client: The index of the netlink client + * @reg: The given registration type to compare with * * Call iwpm_register_pid() to register a client + * Returns true if the client registration matches reg, + * otherwise returns false + */ +u32 iwpm_check_registration(u8 nl_client, u32 reg); + +/** + * iwpm_set_registration - Set the client registration + * @nl_client: The index of the netlink client + * @reg: Registration type to set */ -int iwpm_registered_client(u8 nl_client); +void iwpm_set_registration(u8 nl_client, u32 reg); /** - * iwpm_set_registered - Set the port mapper client to registered or not + * iwpm_get_registration * @nl_client: The index of the netlink client - * @reg: 1 if registered or 0 if not + * + * Returns the client registration type */ -void iwpm_set_registered(u8 nl_client, int reg); +u32 iwpm_get_registration(u8 nl_client); /** * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index a4b1466c1bf6..786fc51bf04b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, mad_agent_priv->qp_info->port_priv->port_num); - if (device->node_type == RDMA_NODE_IB_SWITCH && + if (rdma_cap_ib_switch(device) && smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) port_num = send_wr->wr.ud.port_num; else @@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, if ((opa_get_smp_direction(opa_smp) ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == OPA_LID_PERMISSIVE && - opa_smi_handle_dr_smp_send(opa_smp, device->node_type, + opa_smi_handle_dr_smp_send(opa_smp, + rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid directed route\n"); goto out; } opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); - if (opa_drslid != OPA_LID_PERMISSIVE && + if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && opa_drslid & 0xffff0000) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", @@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, } else { if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == IB_LID_PERMISSIVE && - smi_handle_dr_smp_send(smp, device->node_type, port_num) == + smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "Invalid directed route\n"); @@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv struct ib_smp *smp = (struct ib_smp *)recv->mad; if (smi_handle_dr_smp_recv(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) @@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv if (retsmi == IB_SMI_SEND) { /* don't forward */ if (smi_handle_dr_smp_send(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) return IB_SMI_DISCARD; - } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { + } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; @@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv, struct opa_smp *smp = (struct opa_smp *)recv->mad; if (opa_smi_handle_dr_smp_recv(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) @@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv, if (retsmi == IB_SMI_SEND) { /* don't forward */ if (opa_smi_handle_dr_smp_send(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; @@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv, IB_SMI_DISCARD) return IB_SMI_DISCARD; - } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { + } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; @@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, goto out; } - if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) + if (rdma_cap_ib_switch(port_priv->device)) port_num = wc->port_num; else port_num = port_priv->port_num; @@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) static void ib_mad_init_device(struct ib_device *device) { - int start, end, i; + int start, i; - if (device->node_type == RDMA_NODE_IB_SWITCH) { - start = 0; - end = 0; - } else { - start = 1; - end = device->phys_port_cnt; - } + start = rdma_start_port(device); - for (i = start; i <= end; i++) { + for (i = start; i <= rdma_end_port(device); i++) { if (!rdma_cap_ib_mad(device, i)) continue; @@ -3342,17 +3337,9 @@ error: static void ib_mad_remove_device(struct ib_device *device) { - int start, end, i; - - if (device->node_type == RDMA_NODE_IB_SWITCH) { - start = 0; - end = 0; - } else { - start = 1; - end = device->phys_port_cnt; - } + int i; - for (i = start; i <= end; i++) { + for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { if (!rdma_cap_ib_mad(device, i)) continue; diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 1244f02a5c6d..2cb865c7ce7a 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device) if (!dev) return; - if (device->node_type == RDMA_NODE_IB_SWITCH) - dev->start_port = dev->end_port = 0; - else { - dev->start_port = 1; - dev->end_port = device->phys_port_cnt; - } + dev->start_port = rdma_start_port(device); + dev->end_port = rdma_end_port(device); for (i = 0; i <= dev->end_port - dev->start_port; i++) { if (!rdma_cap_ib_mcast(device, dev->start_port + i)) diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h index 62d91bfa4cb7..3bfab3505a29 100644 --- a/drivers/infiniband/core/opa_smi.h +++ b/drivers/infiniband/core/opa_smi.h @@ -39,12 +39,12 @@ #include "smi.h" -enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, +enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, int port_num, int phys_port_cnt); int opa_smi_get_fwd_port(struct opa_smp *smp); extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - u8 node_type, int port_num); + bool is_switch, int port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 0fae85062a65..ca919f429666 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device) int s, e, i; int count = 0; - if (device->node_type == RDMA_NODE_IB_SWITCH) - s = e = 0; - else { - s = 1; - e = device->phys_port_cnt; - } + s = rdma_start_port(device); + e = rdma_end_port(device); sa_dev = kzalloc(sizeof *sa_dev + (e - s + 1) * sizeof (struct ib_sa_port), diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 368a561d1a5d..f19b23817c2b 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -41,7 +41,7 @@ #include "smi.h" #include "opa_smi.h" -static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, +static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, const u8 *return_path, @@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, /* C14-9:2 */ if (*hop_ptr && *hop_ptr < hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; /* return_path set when received */ @@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, if (*hop_ptr == hop_cnt) { /* return_path set when received */ (*hop_ptr)++; - return (node_type == RDMA_NODE_IB_SWITCH || + return (is_switch || dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } @@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, /* C14-13:2 */ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; (*hop_ptr)--; @@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, if (*hop_ptr == 1) { (*hop_ptr)--; /* C14-13:3 -- SMPs destined for SM shouldn't be here */ - return (node_type == RDMA_NODE_IB_SWITCH || + return (is_switch || dr_slid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } @@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, * Return IB_SMI_DISCARD if the SMP should be discarded */ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - u8 node_type, int port_num) + bool is_switch, int port_num) { - return __smi_handle_dr_smp_send(node_type, port_num, + return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, smp->initial_path, smp->return_path, @@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, } enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - u8 node_type, int port_num) + bool is_switch, int port_num) { - return __smi_handle_dr_smp_send(node_type, port_num, + return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, smp->route.dr.initial_path, smp->route.dr.return_path, @@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, OPA_LID_PERMISSIVE); } -static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, +static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num, int phys_port_cnt, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, @@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, /* C14-9:2 -- intermediate hop */ if (*hop_ptr && *hop_ptr < hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; return_path[*hop_ptr] = port_num; @@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, return_path[*hop_ptr] = port_num; /* hop_ptr updated when sending */ - return (node_type == RDMA_NODE_IB_SWITCH || + return (is_switch || dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } @@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, /* C14-13:2 */ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; /* hop_ptr updated when sending */ @@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, return IB_SMI_HANDLE; } /* hop_ptr updated when sending */ - return (node_type == RDMA_NODE_IB_SWITCH ? - IB_SMI_HANDLE : IB_SMI_DISCARD); + return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> give to SM */ @@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, * Adjust information for a received SMP * Return IB_SMI_DISCARD if the SMP should be dropped */ -enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, +enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, int port_num, int phys_port_cnt) { - return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, + return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, smp->initial_path, smp->return_path, @@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, * Adjust information for a received SMP * Return IB_SMI_DISCARD if the SMP should be dropped */ -enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, +enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, int port_num, int phys_port_cnt) { - return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, + return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, smp->route.dr.initial_path, smp->route.dr.return_path, diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index aff96bac49b4..33c91c8a16e9 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h @@ -51,12 +51,12 @@ enum smi_forward_action { IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */ }; -enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, +enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, int port_num, int phys_port_cnt); int smi_get_fwd_port(struct ib_smp *smp); extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - u8 node_type, int port_num); + bool is_switch, int port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index ed6b6c85c334..0b84a9cdfe5b 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device, goto err_put; } - if (device->node_type == RDMA_NODE_IB_SWITCH) { + if (rdma_cap_ib_switch(device)) { ret = add_port(device, 0, port_callback); if (ret) goto err_put; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 62c24b1452b8..009481073644 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) return 0; } +static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES); static void ib_ucm_release_dev(struct device *dev) { struct ib_ucm_device *ucm_dev; @@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev) if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) clear_bit(ucm_dev->devnum, dev_map); else - clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); + clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map); kfree(ucm_dev); } @@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); static dev_t overflow_maj; -static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES); static int find_overflow_devnum(void) { int ret; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ad45469f7582..29b21213ea75 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) /* Acquire mutex's based on pointer comparison to prevent deadlock. */ if (file1 < file2) { mutex_lock(&file1->mut); - mutex_lock(&file2->mut); + mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); } else { mutex_lock(&file2->mut); - mutex_lock(&file1->mut); + mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); } } @@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void) device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); idr_destroy(&ctx_idr); + idr_destroy(&multicast_idr); } module_init(ucma_init); diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c index 12b5bc23832b..376b031c2c7f 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c @@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 2d7e503d13cb..871dbe56216a 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -31,6 +31,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/idr.h> @@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 bar0 = 0, bar1 = 0; #ifdef CONFIG_X86_64 - if (WARN(pat_enabled(), - "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { + if (pat_enabled()) { + pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n"); ret = -ENODEV; goto bail; } diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 948188e37f95..ad3a926ab3c5 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 48253b839a6f..30ba49c4a98c 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd) spin_lock_init(&idev->qp_table.lock); spin_lock_init(&idev->lk_table.lock); - idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); + idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); /* Set the prefix to the default value (see ch. 4.1.1) */ - idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); + idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL); ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); if (ret) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 85a50df2f203..68b3dfa922bf 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct mlx4_ib_dev *dev = to_mdev(ibdev); const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; + enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; - switch (rdma_port_get_link_layer(ibdev, port_num)) { - case IB_LINK_LAYER_INFINIBAND: - if (!mlx4_is_slave(dev->dev)) - return ib_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); - case IB_LINK_LAYER_ETHERNET: - return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); - default: - return -EINVAL; + /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA + * queries, should be called only by VFs and for that specific purpose + */ + if (link == IB_LINK_LAYER_INFINIBAND) { + if (mlx4_is_slave(dev->dev) && + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && + in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) + return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); + + return ib_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); } + + if (link == IB_LINK_LAYER_ETHERNET) + return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); + + return -EINVAL; } static void send_handler(struct ib_mad_agent *agent, diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 067a691ecbed..8be6db816460 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; props->timestamp_mask = 0xFFFFFFFFFFFFULL; - err = mlx4_get_internal_clock_params(dev->dev, &clock_params); - if (err) - goto out; + if (!mlx4_is_slave(dev->dev)) + err = mlx4_get_internal_clock_params(dev->dev, &clock_params); if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { - resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; resp.response_length += sizeof(resp.hca_core_clock_offset); - resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; + if (!err && !mlx4_is_slave(dev->dev)) { + resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; + resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; + } } if (uhw->outlen) { @@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); if (!dm) { pr_err("failed to allocate memory for tunneling qp update\n"); - goto out; + return; } for (i = 0; i < ports; i++) { dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); if (!dm[i]) { pr_err("failed to allocate memory for tunneling qp update work struct\n"); - for (i = 0; i < dev->caps.num_ports; i++) { - if (dm[i]) - kfree(dm[i]); - } + while (--i >= 0) + kfree(dm[i]); goto out; } - } - /* initialize or tear down tunnel QPs for the slave */ - for (i = 0; i < ports; i++) { INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); dm[i]->port = first_port + i + 1; dm[i]->slave = slave; dm[i]->do_init = do_init; dm[i]->dev = ibdev; - spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); - if (!ibdev->sriov.is_going_down) + } + /* initialize or tear down tunnel QPs for the slave */ + spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); + if (!ibdev->sriov.is_going_down) { + for (i = 0; i < ports; i++) queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); + } else { + spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); + for (i = 0; i < ports; i++) + kfree(dm[i]); } out: kfree(dm); diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 01fc97db45d6..b84d13a487cc 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 6b2418b74c99..7c3f2fb44ba5 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; /* Forward locally generated traps to the SM */ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 9047af429906..8a3ad170d790 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi int rc = arpindex; struct net_device *netdev; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; + __be32 dst_ipaddr = htonl(dst_ip); - rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); + rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0); if (IS_ERR(rt)) { printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", __func__, dst_ip); @@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi else netdev = nesvnic->netdev; - neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); + neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); rcu_read_lock(); if (neigh) { diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 02120d340d50..4713dd7ed764 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr, (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( - (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); + (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]); } else { cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 4bafa15708d0..29b27675dd70 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -215,8 +215,9 @@ int ocrdma_process_mad(struct ib_device *ibdev, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_PERF_MGMT: diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 8a1398b253a2..d98a707a5eb9 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -696,6 +696,7 @@ static void __exit ocrdma_exit_module(void) ocrdma_unregister_inet6addr_notifier(); ocrdma_unregister_inetaddr_notifier(); ocrdma_rem_debugfs(); + idr_destroy(&ocrdma_dev_id); } module_init(ocrdma_init_module); diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 05e3242d8442..9625e7c438e5 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index bd94b0a6e9e5..79859c4d43c9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -239,7 +239,7 @@ struct ipoib_cm_tx { struct net_device *dev; struct ipoib_neigh *neigh; struct ipoib_path *path; - struct ipoib_cm_tx_buf *tx_ring; + struct ipoib_tx_buf *tx_ring; unsigned tx_head; unsigned tx_tail; unsigned long flags; @@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev); void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev); +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req); +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req); + +static inline void ipoib_build_sge(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) +{ + int i, off; + struct sk_buff *skb = tx_req->skb; + skb_frag_t *frags = skb_shinfo(skb)->frags; + int nr_frags = skb_shinfo(skb)->nr_frags; + u64 *mapping = tx_req->mapping; + + if (skb_headlen(skb)) { + priv->tx_sge[0].addr = mapping[0]; + priv->tx_sge[0].length = skb_headlen(skb); + off = 1; + } else + off = 0; + + for (i = 0; i < nr_frags; ++i) { + priv->tx_sge[i + off].addr = mapping[i + off]; + priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); + } + priv->tx_wr.num_sge = nr_frags + off; +} + #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index cf32a778e7d0..ee39be6ccfb0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -694,14 +694,12 @@ repost: static inline int post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, - u64 addr, int len) + struct ipoib_tx_buf *tx_req) { struct ib_send_wr *bad_wr; - priv->tx_sge[0].addr = addr; - priv->tx_sge[0].length = len; + ipoib_build_sge(priv, tx_req); - priv->tx_wr.num_sge = 1; priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); @@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv, void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(dev); - struct ipoib_cm_tx_buf *tx_req; - u64 addr; + struct ipoib_tx_buf *tx_req; int rc; if (unlikely(skb->len > tx->mtu)) { @@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ */ tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; - addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); - if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { + + if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } - tx_req->mapping = addr; - skb_orphan(skb); skb_dst_drop(skb); - rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), - addr, skb->len); + rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; - ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); } else { dev->trans_start = jiffies; @@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_tx *tx = wc->qp->qp_context; unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; - struct ipoib_cm_tx_buf *tx_req; + struct ipoib_tx_buf *tx_req; unsigned long flags; ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", @@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &tx->tx_ring[wr_id]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); /* FIXME: is this right? Shouldn't we only increment on success? */ ++dev->stats.tx_packets; @@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ struct ib_qp *tx_qp; + if (dev->features & NETIF_F_SG) + attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; + tx_qp = ib_create_qp(priv->pd, &attr); if (PTR_ERR(tx_qp) == -EINVAL) { ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", @@ -1170,7 +1167,7 @@ err_tx: static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) { struct ipoib_dev_priv *priv = netdev_priv(p->dev); - struct ipoib_cm_tx_buf *tx_req; + struct ipoib_tx_buf *tx_req; unsigned long begin; ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", @@ -1197,8 +1194,7 @@ timeout: while ((int) p->tx_tail - (int) p->tx_head < 0) { tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, - DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++p->tx_tail; netif_tx_lock_bh(p->dev); @@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work) spin_unlock_irq(&priv->lock); } - static ssize_t show_mode(struct device *d, struct device_attribute *attr, char *buf) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 63b92cbb29ad..d266667ca9b8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -263,8 +263,7 @@ repost: "for buf %d\n", wr_id); } -static int ipoib_dma_map_tx(struct ib_device *ca, - struct ipoib_tx_buf *tx_req) +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; @@ -305,8 +304,8 @@ partial_error: return -EIO; } -static void ipoib_dma_unmap_tx(struct ib_device *ca, - struct ipoib_tx_buf *tx_req) +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; @@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca, int off; if (skb_headlen(skb)) { - ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), + DMA_TO_DEVICE); off = 1; } else off = 0; @@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca, for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), - DMA_TO_DEVICE); + ib_dma_unmap_page(priv->ca, mapping[i + off], + skb_frag_size(frag), DMA_TO_DEVICE); } } @@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &priv->tx_ring[wr_id]; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; @@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv, void *head, int hlen) { struct ib_send_wr *bad_wr; - int i, off; struct sk_buff *skb = tx_req->skb; - skb_frag_t *frags = skb_shinfo(skb)->frags; - int nr_frags = skb_shinfo(skb)->nr_frags; - u64 *mapping = tx_req->mapping; - if (skb_headlen(skb)) { - priv->tx_sge[0].addr = mapping[0]; - priv->tx_sge[0].length = skb_headlen(skb); - off = 1; - } else - off = 0; + ipoib_build_sge(priv, tx_req); - for (i = 0; i < nr_frags; ++i) { - priv->tx_sge[i + off].addr = mapping[i + off]; - priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); - } - priv->tx_wr.num_sge = nr_frags + off; priv->tx_wr.wr_id = wr_id; priv->tx_wr.wr.ud.remote_qpn = qpn; priv->tx_wr.wr.ud.ah = address; @@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; --priv->tx_outstanding; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) netif_wake_queue(dev); @@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev) while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; @@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv) } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, - enum ipoib_flush_level level) + enum ipoib_flush_level level, + int nesting) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; int result; - down_read(&priv->vlan_rwsem); + down_read_nested(&priv->vlan_rwsem, nesting); /* * Flush any child interfaces too -- they might be up even if * the parent is down. */ list_for_each_entry(cpriv, &priv->child_intfs, list) - __ipoib_ib_dev_flush(cpriv, level); + __ipoib_ib_dev_flush(cpriv, level, nesting + 1); up_read(&priv->vlan_rwsem); @@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); } void ipoib_ib_dev_flush_normal(struct work_struct *work) @@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) @@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); } void ipoib_ib_dev_cleanup(struct net_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index da149c278cb8..b2943c84a5dd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu struct ipoib_dev_priv *priv = netdev_priv(dev); if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) - features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); return features; } @@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) ipoib_warn(priv, "enabling connected mode " "will cause multicast packet drops\n"); netdev_update_features(dev); + dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); rtnl_unlock(); priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; @@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format, SET_NETDEV_DEV(priv->dev, hca->dma_device); priv->dev->dev_id = port - 1; - if (!ib_query_port(hca, port, &attr)) + result = ib_query_port(hca, port, &attr); + if (!result) priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); else { printk(KERN_WARNING "%s: ib_query_port %d failed\n", @@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format, goto device_init_failed; } - if (ipoib_set_dev_features(priv, hca)) + result = ipoib_set_dev_features(priv, hca); + if (result) goto device_init_failed; /* @@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device) struct list_head *dev_list; struct net_device *dev; struct ipoib_dev_priv *priv; - int s, e, p; + int p; int count = 0; dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); @@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device) INIT_LIST_HEAD(dev_list); - if (device->node_type == RDMA_NODE_IB_SWITCH) { - s = 0; - e = 0; - } else { - s = 1; - e = device->phys_port_cnt; - } - - for (p = s; p <= e; ++p) { + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { if (!rdma_protocol_ib(device, p)) continue; dev = ipoib_add_port("ib%d", device, p); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 267dc4f75502..31a20b462266 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp) { int tmo, res; - if (strncmp(val, "off", 3) != 0) { - res = kstrtoint(val, 0, &tmo); - if (res) - goto out; - } else { - tmo = -1; - } + res = srp_parse_tmo(&tmo, val); + if (res) + goto out; + if (kp->arg == &srp_reconnect_delay) res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, srp_dev_loss_tmo); @@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device) struct srp_device *srp_dev; struct ib_device_attr *dev_attr; struct srp_host *host; - int mr_page_shift, s, e, p; + int mr_page_shift, p; u64 max_pages_per_mr; dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); @@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device) if (IS_ERR(srp_dev->mr)) goto err_pd; - if (device->node_type == RDMA_NODE_IB_SWITCH) { - s = 0; - e = 0; - } else { - s = 1; - e = device->phys_port_cnt; - } - - for (p = s; p <= e; ++p) { + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { host = srp_add_port(srp_dev, p); if (host) list_add_tail(&host->list, &srp_dev->dev_list); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 82897ca17f32..60ff0a2390e5 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad) int i; ioui = (struct ib_dm_iou_info *)mad->data; - ioui->change_id = __constant_cpu_to_be16(1); + ioui->change_id = cpu_to_be16(1); ioui->max_controllers = 16; /* set present for slot 1 and empty for the rest */ @@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, if (!slot || slot > 16) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); return; } if (slot > 2) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); return; } @@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); iocp->subsys_device_id = 0x0; - iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); - iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); - iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); - iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); + iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS); + iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS); + iocp->protocol = cpu_to_be16(SRP_PROTOCOL); + iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION); iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); iocp->rdma_read_depth = 4; iocp->send_size = cpu_to_be32(srp_max_req_size); @@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid, if (!slot || slot > 16) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); return; } if (slot > 2 || lo > hi || hi > 1) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); return; } @@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, break; default: rsp_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); break; } } @@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, break; case IB_MGMT_METHOD_SET: dm_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); break; default: dm_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); break; } @@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, memset(srp_rsp, 0, sizeof *srp_rsp); srp_rsp->opcode = SRP_RSP; srp_rsp->req_lim_delta = - __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); srp_rsp->tag = tag; srp_rsp->status = status; @@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, memset(srp_rsp, 0, sizeof *srp_rsp); srp_rsp->opcode = SRP_RSP; - srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 - + atomic_xchg(&ch->req_lim_delta, 0)); + srp_rsp->req_lim_delta = + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); srp_rsp->tag = tag; srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; @@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) switch (len) { case 8: if ((*((__be64 *)lun) & - __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) + cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) goto out_err; break; case 4: @@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, } if (it_iu_len > srp_max_req_size || it_iu_len < 64) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); ret = -EINVAL; pr_err("rejected SRP_LOGIN_REQ because its" " length (%d bytes) is out of range (%d .. %d)\n", @@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, } if (!sport->enabled) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); ret = -EINVAL; pr_err("rejected SRP_LOGIN_REQ because the target port" " has not yet been enabled\n"); @@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) || *(__be64 *)(req->target_port_id + 8) != cpu_to_be64(srpt_service_guid)) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); ret = -ENOMEM; pr_err("rejected SRP_LOGIN_REQ because it" " has an invalid target port identifier.\n"); @@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ch = kzalloc(sizeof *ch, GFP_KERNEL); if (!ch) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); ret = -ENOMEM; goto reject; @@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ret = srpt_create_ch_ib(ch); if (ret) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because creating" " a new RDMA channel failed.\n"); goto free_ring; @@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ret = srpt_ch_qp_rtr(ch, ch->qp); if (ret) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because enabling" " RTR failed (error code = %d)\n", ret); goto destroy_ib; @@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (!nacl) { pr_info("Rejected login because no ACL has been" " configured yet for initiator %s.\n", ch->sess_name); - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); goto destroy_ib; } ch->sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(ch->sess)) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_debug("Failed to create session\n"); goto deregister_session; } @@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, rsp->max_it_iu_len = req->req_it_iu_len; rsp->max_ti_iu_len = req->req_it_iu_len; ch->max_ti_iu_len = it_iu_len; - rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT - | SRP_BUF_FORMAT_INDIRECT); + rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); rsp->req_lim_delta = cpu_to_be32(ch->rq_size); atomic_set(&ch->req_lim, ch->rq_size); atomic_set(&ch->req_lim_delta, 0); @@ -2655,8 +2654,8 @@ free_ch: reject: rej->opcode = SRP_LOGIN_REJ; rej->tag = req->tag; - rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT - | SRP_BUF_FORMAT_INDIRECT); + rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, (void *)rej, sizeof *rej); diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c index 074a65ed17bb..766bf2660116 100644 --- a/drivers/input/input-leds.c +++ b/drivers/input/input-leds.c @@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type, { } +static int input_leds_get_count(struct input_dev *dev) +{ + unsigned int led_code; + int count = 0; + + for_each_set_bit(led_code, dev->ledbit, LED_CNT) + if (input_led_info[led_code].name) + count++; + + return count; +} + static int input_leds_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) @@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler, int led_no; int error; - num_leds = bitmap_weight(dev->ledbit, LED_CNT); + num_leds = input_leds_get_count(dev); if (!num_leds) return -ENXIO; @@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler, led->handle = &leds->handle; led->code = led_code; - if (WARN_ON(!input_led_info[led_code].name)) + if (!input_led_info[led_code].name) continue; led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index ce3d40004458..22b9ca901f4e 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) struct input_dev *dev = psmouse->dev; struct elantech_data *etd = psmouse->private; unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; - unsigned int x_res = 0, y_res = 0; + unsigned int x_res = 31, y_res = 31; if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) return -1; @@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) /* For X to recognize me as touchpad. */ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); - input_abs_set_res(dev, ABS_X, x_res); - input_abs_set_res(dev, ABS_Y, y_res); /* * range of pressure and width is the same as v2, * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. @@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); - input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); - input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, ETP_PMAX_V2, 0, 0); /* @@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse) break; } + input_abs_set_res(dev, ABS_X, x_res); + input_abs_set_res(dev, ABS_Y, y_res); + if (etd->hw_version > 1) { + input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); + input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); + } + etd->y_max = y_max; etd->width = width; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index b4d12e29abff..e36162b28c2a 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -15,6 +15,7 @@ */ #include <linux/kernel.h> +#include <linux/dmi.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/input/mt.h> @@ -34,6 +35,7 @@ struct goodix_ts_data { int abs_y_max; unsigned int max_touch_num; unsigned int int_trigger_type; + bool rotated_screen; }; #define GOODIX_MAX_HEIGHT 4096 @@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = { IRQ_TYPE_LEVEL_HIGH, }; +/* + * Those tablets have their coordinates origin at the bottom right + * of the tablet, as if rotated 180 degrees + */ +static const struct dmi_system_id rotated_screen[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "WinBook TW100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW100") + } + }, + { + .ident = "WinBook TW700", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW700") + }, + }, +#endif + {} +}; + /** * goodix_i2c_read - read data from a register of the i2c slave device. * @@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) int input_y = get_unaligned_le16(&coor_data[3]); int input_w = get_unaligned_le16(&coor_data[5]); + if (ts->rotated_screen) { + input_x = ts->abs_x_max - input_x; + input_y = ts->abs_y_max - input_y; + } + input_mt_slot(ts->input_dev, id); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); @@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts) ts->abs_y_max = GOODIX_MAX_HEIGHT; ts->max_touch_num = GOODIX_MAX_CONTACTS; } + + ts->rotated_screen = dmi_check_system(rotated_screen); + if (ts->rotated_screen) + dev_dbg(&ts->client->dev, + "Applying '180 degrees rotated screen' quirk\n"); } /** diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index f2c6c352c55a..2c41107240de 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) goto err_out; } + /* TSC-25 data sheet specifies a delay after the RESET command */ + msleep(150); + /* set coordinate output rate */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index f58a196521a9..80285c71786e 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -429,7 +429,7 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf) goto unlock; } - if (buf[PAYLOAD_LENGTH] == 0) { + if (buf[PAYLOAD_LENGTH] == 0 || buf[PAYLOAD_LENGTH] > FRAME_MAXSIZE) { dev_err(&client->dev, "invalid payload length: %d\n", buf[PAYLOAD_LENGTH]); ret = -EIO; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8e9ec81ce4bb..da902baaa794 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -199,9 +199,10 @@ * Stream table. * * Linear: Enough to cover 1 << IDR1.SIDSIZE entries - * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) + * 2lvl: 128k L1 entries, + * 256 lazy entries per table (each table covers a PCI bus) */ -#define STRTAB_L1_SZ_SHIFT 16 +#define STRTAB_L1_SZ_SHIFT 20 #define STRTAB_SPLIT 8 #define STRTAB_L1_DESC_DWORDS 1 @@ -269,10 +270,10 @@ #define ARM64_TCR_TG0_SHIFT 14 #define ARM64_TCR_TG0_MASK 0x3UL #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 -#define ARM64_TCR_IRGN0_SHIFT 24 +#define ARM64_TCR_IRGN0_SHIFT 8 #define ARM64_TCR_IRGN0_MASK 0x3UL #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 -#define ARM64_TCR_ORGN0_SHIFT 26 +#define ARM64_TCR_ORGN0_SHIFT 10 #define ARM64_TCR_ORGN0_MASK 0x3UL #define CTXDESC_CD_0_TCR_SH0_SHIFT 12 #define ARM64_TCR_SH0_SHIFT 12 @@ -542,6 +543,9 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_HYP (1 << 12) u32 features; +#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) + u32 options; + struct arm_smmu_cmdq cmdq; struct arm_smmu_evtq evtq; struct arm_smmu_priq priq; @@ -602,11 +606,35 @@ struct arm_smmu_domain { static DEFINE_SPINLOCK(arm_smmu_devices_lock); static LIST_HEAD(arm_smmu_devices); +struct arm_smmu_option_prop { + u32 opt; + const char *prop; +}; + +static struct arm_smmu_option_prop arm_smmu_options[] = { + { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, + { 0, NULL}, +}; + static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) { return container_of(dom, struct arm_smmu_domain, domain); } +static void parse_driver_options(struct arm_smmu_device *smmu) +{ + int i = 0; + + do { + if (of_property_read_bool(smmu->dev->of_node, + arm_smmu_options[i].prop)) { + smmu->options |= arm_smmu_options[i].opt; + dev_notice(smmu->dev, "option %s\n", + arm_smmu_options[i].prop); + } + } while (arm_smmu_options[++i].opt); +} + /* Low-level queue manipulation functions */ static bool queue_full(struct arm_smmu_queue *q) { @@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, arm_smmu_sync_ste_for_sid(smmu, sid); /* It's likely that we'll want to use the new STE soon */ - arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); } static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) @@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); - strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; + strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; desc->span = STRTAB_SPLIT + 1; desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, @@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; u64 reg; - u32 size; + u32 size, l1size; int ret; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; /* Calculate the L1 size, capped to the SIDSIZE */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); size = min(size, smmu->sid_bits - STRTAB_SPLIT); - if (size + STRTAB_SPLIT < smmu->sid_bits) + cfg->num_l1_ents = 1 << size; + + size += STRTAB_SPLIT; + if (size < smmu->sid_bits) dev_warn(smmu->dev, "2-level strtab only covers %u/%u bits of SID\n", - size + STRTAB_SPLIT, smmu->sid_bits); + size, smmu->sid_bits); - cfg->num_l1_ents = 1 << size; - size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); - strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma, + l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); + strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, GFP_KERNEL); if (!strtab) { dev_err(smmu->dev, @@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) ret = arm_smmu_init_l1_strtab(smmu); if (ret) dma_free_coherent(smmu->dev, - cfg->num_l1_ents * - (STRTAB_L1_DESC_DWORDS << 3), + l1size, strtab, cfg->strtab_dma); return ret; @@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (irq > 0) smmu->gerr_irq = irq; + parse_driver_options(smmu); + /* Probe the h/w */ ret = arm_smmu_device_probe(smmu); if (ret) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a98a7b27aca1..0649b94f5958 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width) static void domain_exit(struct dmar_domain *domain) { + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; struct page *freelist = NULL; - int i; /* Domain 0 is reserved, so dont process it */ if (!domain) @@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain) /* clear attached or cached domains */ rcu_read_lock(); - for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) - iommu_detach_domain(domain, g_iommus[i]); + for_each_active_iommu(iommu, drhd) + if (domain_type_is_vm(domain) || + test_bit(iommu->seq_id, domain->iommu_bmp)) + iommu_detach_domain(domain, iommu); rcu_read_unlock(); dma_free_pagelist(freelist); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 1b7e155869f6..c00e2db351ba 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -75,6 +75,13 @@ struct its_node { #define ITS_ITT_ALIGN SZ_256 +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; +}; + /* * The ITS view of a device - belongs to an ITS, a collection, owns an * interrupt translation table, and a list of interrupts. @@ -82,11 +89,8 @@ struct its_node { struct its_device { struct list_head entry; struct its_node *its; - struct its_collection *collection; + struct event_lpi_map event_map; void *itt; - unsigned long *lpi_map; - irq_hw_number_t lpi_base; - int nr_lpis; u32 nr_ites; u32 device_id; }; @@ -99,6 +103,14 @@ static struct rdists *gic_rdists; #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + /* * ITS command descriptors - parameters to be encoded in a command * block. @@ -134,7 +146,7 @@ struct its_cmd_desc { struct { struct its_device *dev; struct its_collection *col; - u32 id; + u32 event_id; } its_movi_cmd; struct { @@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, its_fixup_cmd(cmd); - return desc->its_mapd_cmd.dev->collection; + return NULL; } static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, @@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapvi_cmd.dev, + desc->its_mapvi_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_MAPVI); its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); - its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id); + its_encode_collection(cmd, col->col_id); its_fixup_cmd(cmd); - return desc->its_mapvi_cmd.dev->collection; + return col; } static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_MOVI); its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); - its_encode_event_id(cmd, desc->its_movi_cmd.id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); its_fixup_cmd(cmd); - return desc->its_movi_cmd.dev->collection; + return col; } static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_DISCARD); its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); its_encode_event_id(cmd, desc->its_discard_cmd.event_id); its_fixup_cmd(cmd); - return desc->its_discard_cmd.dev->collection; + return col; } static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_INV); its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); its_encode_event_id(cmd, desc->its_inv_cmd.event_id); its_fixup_cmd(cmd); - return desc->its_inv_cmd.dev->collection; + return col; } static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, @@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev, desc.its_movi_cmd.dev = dev; desc.its_movi_cmd.col = col; - desc.its_movi_cmd.id = id; + desc.its_movi_cmd.event_id = id; its_send_single_command(dev->its, its_build_movi_cmd, &desc); } @@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col) static inline u32 its_get_event_id(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); - return d->hwirq - its_dev->lpi_base; + return d->hwirq - its_dev->event_map.lpi_base; } static void lpi_set_config(struct irq_data *d, bool enable) @@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); - its_dev->collection = target_col; + its_dev->event_map.col_map[id] = cpu; return IRQ_SET_MASK_OK_DONE; } @@ -713,8 +745,10 @@ out: return bitmap; } -static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) +static void its_lpi_free(struct event_lpi_map *map) { + int base = map->lpi_base; + int nr_ids = map->nr_lpis; int lpi; spin_lock(&lpi_lock); @@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) spin_unlock(&lpi_lock); - kfree(bitmap); + kfree(map->lpi_map); + kfree(map->col_map); } /* @@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, struct its_device *dev; unsigned long *lpi_map; unsigned long flags; + u16 *col_map = NULL; void *itt; int lpi_base; int nr_lpis; int nr_ites; - int cpu; int sz; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); - if (!dev || !itt || !lpi_map) { + if (!dev || !itt || !lpi_map || !col_map) { kfree(dev); kfree(itt); kfree(lpi_map); + kfree(col_map); return NULL; } dev->its = its; dev->itt = itt; dev->nr_ites = nr_ites; - dev->lpi_map = lpi_map; - dev->lpi_base = lpi_base; - dev->nr_lpis = nr_lpis; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); @@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, list_add(&dev->entry, &its->its_device_list); raw_spin_unlock_irqrestore(&its->lock, flags); - /* Bind the device to the first possible CPU */ - cpu = cpumask_first(cpu_online_mask); - dev->collection = &its->collections[cpu]; - /* Map device to its ITT */ its_send_mapd(dev, 1); @@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) { int idx; - idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis); - if (idx == dev->nr_lpis) + idx = find_first_zero_bit(dev->event_map.lpi_map, + dev->event_map.nr_lpis); + if (idx == dev->event_map.nr_lpis) return -ENOSPC; - *hwirq = dev->lpi_base + idx; - set_bit(idx, dev->lpi_map); + *hwirq = dev->event_map.lpi_base + idx; + set_bit(idx, dev->event_map.lpi_map); return 0; } @@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq, &its_irq_chip, its_dev); dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", - (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i); + (int)(hwirq - its_dev->event_map.lpi_base), + (int)hwirq, virq + i); } return 0; @@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + /* Bind the LPI to the first possible CPU */ + its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); + /* Map the GIC IRQ and event to the device */ its_send_mapvi(its_dev, d->hwirq, event); } @@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, u32 event = its_get_event_id(data); /* Mark interrupt index as unused */ - clear_bit(event, its_dev->lpi_map); + clear_bit(event, its_dev->event_map.lpi_map); /* Nuke the entry in the domain */ irq_domain_reset_irq_data(data); } /* If all interrupts have been freed, start mopping the floor */ - if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) { - its_lpi_free(its_dev->lpi_map, - its_dev->lpi_base, - its_dev->nr_lpis); + if (bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(&its_dev->event_map); /* Unmap device/itt */ its_send_mapd(its_dev, 0); diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index a45121546caf..acb721b31bcf 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -2,7 +2,7 @@ * SPEAr platform shared irq layer source file * * Copyright (C) 2009-2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * Copyright (C) 2012 ST Microelectronics * Shiraz Hashim <shiraz.linux.kernel@gmail.com> diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8c91fd5eb6fd..375be509e95f 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) cs->hw.ser->tty = tty; atomic_set(&cs->hw.ser->refcnt, 1); init_completion(&cs->hw.ser->dead_cmp); - tty->disc_data = cs; + /* Set the amount of data we're willing to receive per call + * from the hardware driver to half of the input buffer size + * to leave some reserve. + * Note: We don't do flow control towards the hardware driver. + * If more data is received than will fit into the input buffer, + * it will be dropped and an error will be logged. This should + * never happen as the device is slow and the buffer size ample. + */ + tty->receive_room = RBUFSIZE/2; + /* OK.. Initialization of the datastructures and the HW is done.. Now * startup system and notify the LL that we are ready to run */ @@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty) } /* - * Read on the tty. - * Unused, received data goes only to the Gigaset driver. - */ -static ssize_t -gigaset_tty_read(struct tty_struct *tty, struct file *file, - unsigned char __user *buf, size_t count) -{ - return -EAGAIN; -} - -/* - * Write on the tty. - * Unused, transmit data comes only from the Gigaset driver. - */ -static ssize_t -gigaset_tty_write(struct tty_struct *tty, struct file *file, - const unsigned char *buf, size_t count) -{ - return -EAGAIN; -} - -/* * Ioctl on the tty. * Called in process context only. * May be re-entered by multiple ioctl calling threads. @@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = { .open = gigaset_tty_open, .close = gigaset_tty_close, .hangup = gigaset_tty_hangup, - .read = gigaset_tty_read, - .write = gigaset_tty_write, .ioctl = gigaset_tty_ioctl, .receive_buf = gigaset_tty_receive, .write_wakeup = gigaset_tty_wakeup, diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index a08e3eeac3c5..79a6d63e8ed3 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list) do { \ set_closure_fn(_cl, _fn, _wq); \ closure_sub(_cl, CLOSURE_RUNNING + 1); \ - return; \ } while (0) /** @@ -349,7 +348,6 @@ do { \ do { \ set_closure_fn(_cl, _fn, _wq); \ closure_queue(_cl); \ - return; \ } while (0) /** @@ -365,7 +363,6 @@ do { \ do { \ set_closure_fn(_cl, _destructor, NULL); \ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ - return; \ } while (0) /** diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index cb64e64a4789..bf6a9ca18403 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) } while (n != bio); continue_at(&s->cl, bch_bio_submit_split_done, NULL); + return; submit: generic_make_request(bio); } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ce64fc851251..418607a6ba33 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl) if (!w->need_write) { closure_return_with_destructor(cl, journal_write_unlock); + return; } else if (journal_full(&c->journal)) { journal_reclaim(c); spin_unlock(&c->journal.lock); btree_flush_write(c); continue_at(cl, journal_write, system_wq); + return; } c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 4afb2d26b148..f292790997d7 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl) if (journal_ref) atomic_dec_bug(journal_ref); - if (!op->insert_data_done) + if (!op->insert_data_done) { continue_at(cl, bch_data_insert_start, op->wq); + return; + } bch_keylist_free(&op->insert_keys); closure_return(cl); @@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl) /* 1 for the device pointer and 1 for the chksum */ if (bch_keylist_realloc(&op->insert_keys, 3 + (op->csum ? 1 : 0), - op->c)) + op->c)) { continue_at(cl, bch_data_insert_keys, op->wq); + return; + } k = op->insert_keys.top; bkey_init(k); @@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl) op->insert_data_done = true; continue_at(cl, bch_data_insert_keys, op->wq); + return; err: /* bch_alloc_sectors() blocks if s->writeback = true */ BUG_ON(op->writeback); @@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl) ret = bch_btree_map_keys(&s->op, s->iop.c, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), cache_lookup_fn, MAP_END_KEY); - if (ret == -EAGAIN) + if (ret == -EAGAIN) { continue_at(cl, cache_lookup, bcache_wq); + return; + } closure_return(cl); } @@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) continue_at_nobarrier(&s->cl, flash_dev_nodata, bcache_wq); + return; } else if (rw) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &KEY(d->id, bio->bi_iter.bi_sector, 0), diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ed2346ddf4c9..e51de52eeb94 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; - bitmap->storage.sb_page = alloc_page(GFP_KERNEL); + bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (bitmap->storage.sb_page == NULL) return -ENOMEM; bitmap->storage.sb_page->index = 0; @@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) sb->state = cpu_to_le32(bitmap->flags); bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->mddev->events); + bitmap->mddev->bitmap_info.nodes = 0; kunmap_atomic(sb); @@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) unsigned long sectors_reserved = 0; int err = -EINVAL; struct page *sb_page; + loff_t offset = bitmap->mddev->bitmap_info.offset; if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { chunksize = 128 * 1024 * 1024; @@ -584,9 +586,9 @@ re_read: bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); /* to 4k blocks */ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); - bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); + offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, - bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); + bitmap->cluster_slot, offset); } if (bitmap->storage.file) { @@ -597,7 +599,7 @@ re_read: bitmap, bytes, sb_page); } else { err = read_sb_page(bitmap->mddev, - bitmap->mddev->bitmap_info.offset, + offset, sb_page, 0, sizeof(bitmap_super_t)); } @@ -611,8 +613,16 @@ re_read: daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); - nodes = le32_to_cpu(sb->nodes); - strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); + /* XXX: This is a hack to ensure that we don't use clustering + * in case: + * - dm-raid is in use and + * - the nodes written in bitmap_sb is erroneous. + */ + if (!bitmap->mddev->sync_super) { + nodes = le32_to_cpu(sb->nodes); + strlcpy(bitmap->mddev->bitmap_info.cluster_name, + sb->cluster_name, 64); + } /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) @@ -671,7 +681,7 @@ out: kunmap_atomic(sb); /* Assiging chunksize is required for "re_read" */ bitmap->mddev->bitmap_info.chunksize = chunksize; - if (nodes && (bitmap->cluster_slot < 0)) { + if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { err = md_setup_cluster(bitmap->mddev, nodes); if (err) { pr_err("%s: Could not setup cluster service (%d)\n", @@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot, if (IS_ERR(bitmap)) return PTR_ERR(bitmap); - rv = bitmap_read_sb(bitmap); - if (rv) - goto err; - rv = bitmap_init_from_disk(bitmap, 0); if (rv) goto err; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1b4e1756b169..b680da5d7b93 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -424,7 +424,6 @@ static void free_migration(struct dm_cache_migration *mg) wake_up(&cache->migration_wait); mempool_free(mg, cache->migration_pool); - wake_worker(cache); } static int prealloc_data_structs(struct cache *cache, struct prealloc *p) @@ -1947,6 +1946,7 @@ static int commit_if_needed(struct cache *cache) static void process_deferred_bios(struct cache *cache) { + bool prealloc_used = false; unsigned long flags; struct bio_list bios; struct bio *bio; @@ -1981,13 +1981,16 @@ static void process_deferred_bios(struct cache *cache) process_discard_bio(cache, &structs, bio); else process_bio(cache, &structs, bio); + prealloc_used = true; } - prealloc_free_structs(cache, &structs); + if (prealloc_used) + prealloc_free_structs(cache, &structs); } static void process_deferred_cells(struct cache *cache) { + bool prealloc_used = false; unsigned long flags; struct dm_bio_prison_cell *cell, *tmp; struct list_head cells; @@ -2015,9 +2018,11 @@ static void process_deferred_cells(struct cache *cache) } process_cell(cache, &structs, cell); + prealloc_used = true; } - prealloc_free_structs(cache, &structs); + if (prealloc_used) + prealloc_free_structs(cache, &structs); } static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) @@ -2062,7 +2067,7 @@ static void process_deferred_writethrough_bios(struct cache *cache) static void writeback_some_dirty_blocks(struct cache *cache) { - int r = 0; + bool prealloc_used = false; dm_oblock_t oblock; dm_cblock_t cblock; struct prealloc structs; @@ -2072,23 +2077,21 @@ static void writeback_some_dirty_blocks(struct cache *cache) memset(&structs, 0, sizeof(structs)); while (spare_migration_bandwidth(cache)) { - if (prealloc_data_structs(cache, &structs)) - break; + if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) + break; /* no work to do */ - r = policy_writeback_work(cache->policy, &oblock, &cblock, busy); - if (r) - break; - - r = get_cell(cache, oblock, &structs, &old_ocell); - if (r) { + if (prealloc_data_structs(cache, &structs) || + get_cell(cache, oblock, &structs, &old_ocell)) { policy_set_dirty(cache->policy, oblock); break; } writeback(cache, &structs, oblock, cblock, old_ocell); + prealloc_used = true; } - prealloc_free_structs(cache, &structs); + if (prealloc_used) + prealloc_free_structs(cache, &structs); } /*---------------------------------------------------------------- @@ -3496,7 +3499,7 @@ static void cache_resume(struct dm_target *ti) * <#demotions> <#promotions> <#dirty> * <#features> <features>* * <#core args> <core args> - * <policy name> <#policy args> <policy args>* <cache metadata mode> + * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check> */ static void cache_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) @@ -3582,6 +3585,11 @@ static void cache_status(struct dm_target *ti, status_type_t type, else DMEMIT("rw "); + if (dm_cache_metadata_needs_check(cache->cmd)) + DMEMIT("needs_check "); + else + DMEMIT("- "); + break; case STATUSTYPE_TABLE: @@ -3820,7 +3828,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 7, 0}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c33f61a4cc28..1c50c580215c 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/sort.h> #include <linux/rbtree.h> @@ -268,7 +269,7 @@ struct pool { process_mapping_fn process_prepared_mapping; process_mapping_fn process_prepared_discard; - struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE]; + struct dm_bio_prison_cell **cell_sort_array; }; static enum pool_mode get_pool_mode(struct pool *pool); @@ -2281,18 +2282,23 @@ static void do_waker(struct work_struct *ws) queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); } +static void notify_of_pool_mode_change_to_oods(struct pool *pool); + /* * We're holding onto IO to allow userland time to react. After the * timeout either the pool will have been resized (and thus back in - * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. + * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space. */ static void do_no_space_timeout(struct work_struct *ws) { struct pool *pool = container_of(to_delayed_work(ws), struct pool, no_space_timeout); - if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) - set_pool_mode(pool, PM_READ_ONLY); + if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { + pool->pf.error_if_no_space = true; + notify_of_pool_mode_change_to_oods(pool); + error_retry_list(pool); + } } /*----------------------------------------------------------------*/ @@ -2370,6 +2376,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) dm_device_name(pool->pool_md), new_mode); } +static void notify_of_pool_mode_change_to_oods(struct pool *pool) +{ + if (!pool->pf.error_if_no_space) + notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); + else + notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); +} + static bool passdown_enabled(struct pool_c *pt) { return pt->adjusted_pf.discard_passdown; @@ -2454,7 +2468,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) * frequently seeing this mode. */ if (old_mode != new_mode) - notify_of_pool_mode_change(pool, "out-of-data-space"); + notify_of_pool_mode_change_to_oods(pool); pool->process_bio = process_bio_read_only; pool->process_discard = process_discard_bio; pool->process_cell = process_cell_read_only; @@ -2777,6 +2791,7 @@ static void __pool_destroy(struct pool *pool) { __pool_table_remove(pool); + vfree(pool->cell_sort_array); if (dm_pool_metadata_close(pool->pmd) < 0) DMWARN("%s: dm_pool_metadata_close() failed.", __func__); @@ -2889,6 +2904,13 @@ static struct pool *pool_create(struct mapped_device *pool_md, goto bad_mapping_pool; } + pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); + if (!pool->cell_sort_array) { + *error = "Error allocating cell sort array"; + err_p = ERR_PTR(-ENOMEM); + goto bad_sort_array; + } + pool->ref_count = 1; pool->last_commit_jiffies = jiffies; pool->pool_md = pool_md; @@ -2897,6 +2919,8 @@ static struct pool *pool_create(struct mapped_device *pool_md, return pool; +bad_sort_array: + mempool_destroy(pool->mapping_pool); bad_mapping_pool: dm_deferred_set_destroy(pool->all_io_ds); bad_all_io_ds: @@ -3714,6 +3738,7 @@ static void emit_flags(struct pool_features *pf, char *result, * Status line is: * <transaction id> <used metadata sectors>/<total metadata sectors> * <used data sectors>/<total data sectors> <held metadata root> + * <pool mode> <discard config> <no space config> <needs_check> */ static void pool_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) @@ -3815,6 +3840,11 @@ static void pool_status(struct dm_target *ti, status_type_t type, else DMEMIT("queue_if_no_space "); + if (dm_pool_metadata_needs_check(pool->pmd)) + DMEMIT("needs_check "); + else + DMEMIT("- "); + break; case STATUSTYPE_TABLE: @@ -3918,7 +3948,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 15, 0}, + .version = {1, 16, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -4305,7 +4335,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type thin_target = { .name = "thin", - .version = {1, 15, 0}, + .version = {1, 16, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f331d888e7f5..ab37ae114e94 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) */ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) { - int nr_requests_pending; - atomic_dec(&md->pending[rw]); /* nudge anyone waiting on suspend queue */ - nr_requests_pending = md_in_flight(md); - if (!nr_requests_pending) + if (!md_in_flight(md)) wake_up(&md->wait); /* @@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) if (run_queue) { if (md->queue->mq_ops) blk_mq_run_hw_queues(md->queue, true); - else if (!nr_requests_pending || - (nr_requests_pending >= md->queue->nr_congestion_on)) + else blk_run_queue_async(md->queue); } @@ -2281,8 +2277,6 @@ static void dm_init_old_md_queue(struct mapped_device *md) static void cleanup_mapped_device(struct mapped_device *md) { - cleanup_srcu_struct(&md->io_barrier); - if (md->wq) destroy_workqueue(md->wq); if (md->kworker_task) @@ -2294,6 +2288,8 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->bs) bioset_free(md->bs); + cleanup_srcu_struct(&md->io_barrier); + if (md->disk) { spin_lock(&_minor_lock); md->disk->private_data = NULL; diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index fcfc4b9b2672..0072190515e0 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -44,6 +44,7 @@ struct resync_info { /* md_cluster_info flags */ #define MD_CLUSTER_WAITING_FOR_NEWDISK 1 +#define MD_CLUSTER_SUSPEND_READ_BALANCING 2 struct md_cluster_info { @@ -275,6 +276,9 @@ clear_bit: static void recover_prep(void *arg) { + struct mddev *mddev = arg; + struct md_cluster_info *cinfo = mddev->cluster_info; + set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); } static void recover_slot(void *arg, struct dlm_slot *slot) @@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots, cinfo->slot_number = our_slot; complete(&cinfo->completion); + clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); } static const struct dlm_lockspace_ops md_ls_ops = { @@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev) resync_send(mddev, RESYNCING, 0, 0); } -static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) +static int area_resyncing(struct mddev *mddev, int direction, + sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; int ret = 0; struct suspend_info *s; + if ((direction == READ) && + test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) + return 1; + spin_lock_irq(&cinfo->suspend_lock); if (list_empty(&cinfo->suspend_list)) goto out; diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index 6817ee00e053..00defe2badbc 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h @@ -18,7 +18,7 @@ struct md_cluster_operations { int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_cancel)(struct mddev *mddev); - int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); + int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); int (*add_new_disk_finish)(struct mddev *mddev); int (*new_disk_ack)(struct mddev *mddev, bool ack); diff --git a/drivers/md/md.c b/drivers/md/md.c index d429c30cd514..0c2a4e8b873c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev) { struct md_personality *pers = mddev->pers; mddev_detach(mddev); + /* Ensure ->event_work is done */ + flush_workqueue(md_misc_wq); spin_lock(&mddev->lock); mddev->ready = 0; mddev->pers = NULL; @@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes) err = request_module("md-cluster"); if (err) { pr_err("md-cluster module not found.\n"); - return err; + return -ENOENT; } spin_lock(&pers_lock); diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index e04cfd2d60ef..9836c0ae897c 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, if (s < 0 && nr_center < -s) { /* not enough in central node */ - shift(left, center, nr_center); - s = nr_center - target; + shift(left, center, -nr_center); + s += nr_center; shift(left, right, s); nr_right += s; } else @@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, if (s > 0 && nr_center < s) { /* not enough in central node */ shift(center, right, nr_center); - s = target - nr_center; + s -= nr_center; shift(left, right, s); nr_left -= s; } else diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 200ac12a1d40..fdd3793e22f9 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) int r; struct del_stack *s; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc(sizeof(*s), GFP_NOIO); if (!s) return -ENOMEM; s->info = info; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f80f1af61ce7..94f5b55069e0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error) spin_lock_irqsave(&conf->device_lock, flags); if (r1_bio->mddev->degraded == conf->raid_disks || (r1_bio->mddev->degraded == conf->raid_disks-1 && - !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) + test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) uptodate = 1; spin_unlock_irqrestore(&conf->device_lock, flags); } @@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if ((conf->mddev->recovery_cp < this_sector + sectors) || (mddev_is_clustered(conf->mddev) && - md_cluster_ops->area_resyncing(conf->mddev, this_sector, + md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, this_sector + sectors))) choose_first = 1; else @@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || (mddev_is_clustered(mddev) && - md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { + md_cluster_ops->area_resyncing(mddev, WRITE, + bio->bi_iter.bi_sector, bio_end_sector(bio))))) { /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. @@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) if (bio_end_sector(bio) <= mddev->suspend_lo || bio->bi_iter.bi_sector >= mddev->suspend_hi || (mddev_is_clustered(mddev) && - !md_cluster_ops->area_resyncing(mddev, + !md_cluster_ops->area_resyncing(mddev, WRITE, bio->bi_iter.bi_sector, bio_end_sector(bio)))) break; schedule(); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 940f2f365461..38c58e19cfce 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) /* far_copies must be 1 */ conf->prev.stride = conf->dev_sectors; } + conf->reshape_safe = conf->reshape_progress; spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); @@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev) } conf->offset_diff = min_offset_diff; - conf->reshape_safe = conf->reshape_progress; clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); @@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev) conf->reshape_progress = size; } else conf->reshape_progress = 0; + conf->reshape_safe = conf->reshape_progress; spin_unlock_irq(&conf->device_lock); if (mddev->delta_disks && mddev->bitmap) { @@ -4170,6 +4171,7 @@ abort: rdev->new_data_offset = rdev->data_offset; smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; mddev->reshape_position = MaxSector; spin_unlock_irq(&conf->device_lock); return ret; @@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf) md_finish_reshape(conf->mddev); smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; spin_unlock_irq(&conf->device_lock); /* read-ahead size must cover two whole stripes, which is diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 59e44e99eef3..643d217bfa13 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) if (!sc) return -ENOMEM; + /* Need to ensure auto-resizing doesn't interfere */ + mutex_lock(&conf->cache_size_mutex); + for (i = conf->max_nr_stripes; i; i--) { nsh = alloc_stripe(sc, GFP_KERNEL); if (!nsh) @@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) kmem_cache_free(sc, nsh); } kmem_cache_destroy(sc); + mutex_unlock(&conf->cache_size_mutex); return -ENOMEM; } /* Step 2 - Must use GFP_NOIO now. @@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) } else err = -ENOMEM; + mutex_unlock(&conf->cache_size_mutex); /* Step 4, return new stripes to service */ while(!list_empty(&newstripes)) { nsh = list_entry(newstripes.next, struct stripe_head, lru); @@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) &first_bad, &bad_sectors)) set_bit(R5_ReadRepl, &dev->flags); else { - if (rdev) + if (rdev && !test_bit(Faulty, &rdev->flags)) set_bit(R5_NeedReplace, &dev->flags); + else + clear_bit(R5_NeedReplace, &dev->flags); rdev = rcu_dereference(conf->disks[i].rdev); clear_bit(R5_ReadRepl, &dev->flags); } @@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); - if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { + if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && + mutex_trylock(&conf->cache_size_mutex)) { grow_one_stripe(conf, __GFP_NOWARN); /* Set flag even if allocation failed. This helps * slow down allocation requests when mem is short */ set_bit(R5_DID_ALLOC, &conf->cache_state); + mutex_unlock(&conf->cache_size_mutex); } async_tx_issue_pending_all(); @@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) return -EINVAL; conf->min_nr_stripes = size; + mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) ; + mutex_unlock(&conf->cache_size_mutex); err = md_allow_write(mddev); if (err) return err; + mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) if (!grow_one_stripe(conf, GFP_KERNEL)) break; + mutex_unlock(&conf->cache_size_mutex); return 0; } @@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); - int ret = 0; - while (ret < sc->nr_to_scan) { - if (drop_one_stripe(conf) == 0) - return SHRINK_STOP; - ret++; + unsigned long ret = SHRINK_STOP; + + if (mutex_trylock(&conf->cache_size_mutex)) { + ret= 0; + while (ret < sc->nr_to_scan) { + if (drop_one_stripe(conf) == 0) { + ret = SHRINK_STOP; + break; + } + ret++; + } + mutex_unlock(&conf->cache_size_mutex); } return ret; } @@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) goto abort; spin_lock_init(&conf->device_lock); seqcount_init(&conf->gen_lock); + mutex_init(&conf->cache_size_mutex); init_waitqueue_head(&conf->wait_for_quiescent); for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { init_waitqueue_head(&conf->wait_for_stripe[i]); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 02c3bf8fbfe7..d05144278690 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -482,7 +482,8 @@ struct r5conf { */ int active_name; char cache_name[2][32]; - struct kmem_cache *slab_cache; /* for allocating stripes */ + struct kmem_cache *slab_cache; /* for allocating stripes */ + struct mutex cache_size_mutex; /* Protect changes to cache size */ int seq_flush, seq_write; int quiesce; diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 4cb365d4ffdc..8b95eefb610b 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -38,6 +38,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/fb.h> @@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv) { int rc; +#ifdef CONFIG_X86_64 + if (pat_enabled()) { + pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); + return -ENODEV; + } +#endif + if (itv->osd_info) { IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); return -EBUSY; @@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void) int registered = 0; int err; -#ifdef CONFIG_X86_64 - if (WARN(pat_enabled(), - "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { - return -ENODEV; - } -#endif if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c index 5c054031c3f8..e14c8c9d189b 100644 --- a/drivers/mfd/stmpe-i2c.c +++ b/drivers/mfd/stmpe-i2c.c @@ -6,7 +6,7 @@ * * License Terms: GNU General Public License, version 2 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson - * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics + * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics */ #include <linux/i2c.h> diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c index a81badbaa917..6fdb30e84a2b 100644 --- a/drivers/mfd/stmpe-spi.c +++ b/drivers/mfd/stmpe-spi.c @@ -4,7 +4,7 @@ * Copyright (C) ST Microelectronics SA 2011 * * License Terms: GNU General Public License, version 2 - * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics + * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics */ #include <linux/spi/spi.h> @@ -146,4 +146,4 @@ module_exit(stmpe_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 8eb0a9500a90..e9513d651cd3 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent) /* Fill in the data structures */ devno = MKDEV(MAJOR(mei_devt), dev->minor); cdev_init(&dev->cdev, &mei_fops); - dev->cdev.owner = mei_fops.owner; + dev->cdev.owner = parent->driver->owner; /* Add the device */ ret = cdev_add(&dev->cdev, devno, 1); diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c index 41e3bdb10061..6dfdae3452d6 100644 --- a/drivers/misc/mic/scif/scif_nodeqp.c +++ b/drivers/misc/mic/scif/scif_nodeqp.c @@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg) } static struct scatterlist * -scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) +scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt) { struct scatterlist *sg; struct page *page; @@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) return NULL; sg_init_table(sg, page_cnt); for (i = 0; i < page_cnt; i++) { - page = vmalloc_to_page((void __force *)va); - if (!page) - goto p2p_sg_err; + page = pfn_to_page(pa >> PAGE_SHIFT); sg_set_page(&sg[i], page, page_size, 0); - va += page_size; + pa += page_size; } return sg; -p2p_sg_err: - kfree(sg); - return NULL; } /* Init p2p mappings required to access peerdev from scifdev */ @@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev) p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); if (!p2p) return NULL; - p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, + p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa, PAGE_SIZE, num_mmio_pages); if (!p2p->ppi_sg[SCIF_PPI_MMIO]) goto free_p2p; p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); - p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, + p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa, 1 << sg_page_shift, num_aper_chunks); p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c9c3d20b784b..a1b820fcb2a6 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev, ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); + mmc_blk_put(md); + return ret; } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index fd9a58e216a5..6a0f9c79be26 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI config MMC_MTK tristate "MediaTek SD/MMC Card Interface support" + depends on HAS_DMA help This selects the MediaTek(R) Secure digital and Multimedia card Interface. If you have a machine with a integrated SD/MMC card reader, say Y or M here. diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index b2b411da297b..4d1203236890 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) if (status & (CTO_EN | CCRC_EN)) end_cmd = 1; + if (host->data || host->response_busy) { + end_trans = !end_cmd; + host->response_busy = 0; + } if (status & (CTO_EN | DTO_EN)) hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); - else if (status & (CCRC_EN | DCRC_EN)) + else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN | + BADA_EN)) hsmmc_command_incomplete(host, -EILSEQ, end_cmd); if (status & ACE_EN) { @@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) } dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); } - if (host->data || host->response_busy) { - end_trans = !end_cmd; - host->response_busy = 0; - } } OMAP_HSMMC_WRITE(host->base, STAT, status); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index faf0cb910c96..c6b9f6492e1a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct pltfm_imx_data *imx_data = pltfm_host->priv; - struct esdhc_platform_data *boarddata = &imx_data->boarddata; - if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) - return boarddata->f_max; - else - return pltfm_host->clock; + return pltfm_host->clock; } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) @@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { static int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, - struct esdhc_platform_data *boarddata) + struct pltfm_imx_data *imx_data) { struct device_node *np = pdev->dev.of_node; - - if (!np) - return -ENODEV; - - if (of_get_property(np, "non-removable", NULL)) - boarddata->cd_type = ESDHC_CD_PERMANENT; - - if (of_get_property(np, "fsl,cd-controller", NULL)) - boarddata->cd_type = ESDHC_CD_CONTROLLER; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + int ret; if (of_get_property(np, "fsl,wp-controller", NULL)) boarddata->wp_type = ESDHC_WP_CONTROLLER; - boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); - if (gpio_is_valid(boarddata->cd_gpio)) - boarddata->cd_type = ESDHC_CD_GPIO; - boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); if (gpio_is_valid(boarddata->wp_gpio)) boarddata->wp_type = ESDHC_WP_GPIO; - of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); - - of_property_read_u32(np, "max-frequency", &boarddata->f_max); - if (of_find_property(np, "no-1-8-v", NULL)) boarddata->support_vsel = false; else @@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); + /* sdr50 and sdr104 needs work on 1.8v signal voltage */ + if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && + !IS_ERR(imx_data->pins_default)) { + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_100MHZ); + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_200MHZ); + if (IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) { + dev_warn(mmc_dev(host->mmc), + "could not get ultra high speed state, work on normal mode\n"); + /* + * fall back to not support uhs by specify no 1.8v quirk + */ + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + } else { + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + /* call to generic mmc_of_parse to support additional capabilities */ - return mmc_of_parse(host->mmc); + ret = mmc_of_parse(host->mmc); + if (ret) + return ret; + + if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + return 0; } #else static inline int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, - struct esdhc_platform_data *boarddata) + struct pltfm_imx_data *imx_data) { return -ENODEV; } #endif +static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, + struct sdhci_host *host, + struct pltfm_imx_data *imx_data) +{ + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + int err; + + if (!host->mmc->parent->platform_data) { + dev_err(mmc_dev(host->mmc), "no board data!\n"); + return -EINVAL; + } + + imx_data->boarddata = *((struct esdhc_platform_data *) + host->mmc->parent->platform_data); + /* write_protect */ + if (boarddata->wp_type == ESDHC_WP_GPIO) { + err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request write-protect gpio!\n"); + return err; + } + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + } + + /* card_detect */ + switch (boarddata->cd_type) { + case ESDHC_CD_GPIO: + err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request card-detect gpio!\n"); + return err; + } + /* fall through */ + + case ESDHC_CD_CONTROLLER: + /* we have a working card_detect back */ + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + break; + + case ESDHC_CD_PERMANENT: + host->mmc->caps |= MMC_CAP_NONREMOVABLE; + break; + + case ESDHC_CD_NONE: + break; + } + + switch (boarddata->max_bus_width) { + case 8: + host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; + break; + case 4: + host->mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + case 1: + default: + host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + break; + } + + return 0; +} + static int sdhci_esdhc_imx_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(imx_esdhc_dt_ids, &pdev->dev); struct sdhci_pltfm_host *pltfm_host; struct sdhci_host *host; - struct esdhc_platform_data *boarddata; int err; struct pltfm_imx_data *imx_data; - bool dt = true; host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); if (IS_ERR(host)) @@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - boarddata = &imx_data->boarddata; - if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { - if (!host->mmc->parent->platform_data) { - dev_err(mmc_dev(host->mmc), "no board data!\n"); - err = -EINVAL; - goto disable_clk; - } - imx_data->boarddata = *((struct esdhc_platform_data *) - host->mmc->parent->platform_data); - dt = false; - } - /* write_protect */ - if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) { - err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request write-protect gpio!\n"); - goto disable_clk; - } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - } - - /* card_detect */ - switch (boarddata->cd_type) { - case ESDHC_CD_GPIO: - if (dt) - break; - err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request card-detect gpio!\n"); - goto disable_clk; - } - /* fall through */ - - case ESDHC_CD_CONTROLLER: - /* we have a working card_detect back */ - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; - break; - - case ESDHC_CD_PERMANENT: - host->mmc->caps |= MMC_CAP_NONREMOVABLE; - break; - - case ESDHC_CD_NONE: - break; - } - - switch (boarddata->max_bus_width) { - case 8: - host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; - break; - case 4: - host->mmc->caps |= MMC_CAP_4_BIT_DATA; - break; - case 1: - default: - host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; - break; - } - - /* sdr50 and sdr104 needs work on 1.8v signal voltage */ - if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && - !IS_ERR(imx_data->pins_default)) { - imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, - ESDHC_PINCTRL_STATE_100MHZ); - imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, - ESDHC_PINCTRL_STATE_200MHZ); - if (IS_ERR(imx_data->pins_100mhz) || - IS_ERR(imx_data->pins_200mhz)) { - dev_warn(mmc_dev(host->mmc), - "could not get ultra high speed state, work on normal mode\n"); - /* fall back to not support uhs by specify no 1.8v quirk */ - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } - } else { - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } + if (of_id) + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + else + err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data); + if (err) + goto disable_clk; err = sdhci_add_host(host); if (err) diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index 3497cfaf683c..a870c42731d7 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -45,6 +45,6 @@ #define ESDHC_DMA_SYSCTL 0x40c #define ESDHC_DMA_SNOOP 0x00000040 -#define ESDHC_HOST_CONTROL_RES 0x05 +#define ESDHC_HOST_CONTROL_RES 0x01 #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 9cd5fc62f130..946d37f94a31 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) goto err_of_parse; sdhci_get_of_property(pdev); pdata = pxav3_get_mmc_pdata(dev); + pdev->dev.platform_data = pdata; } else if (pdata) { /* on-chip device */ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index df088343d60f..255a896769b8 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -4,7 +4,7 @@ * Support of SDHCI platform devices for spear soc family * * Copyright (C) 2010 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * Inspired by sdhci-pltfm.c * @@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = { module_platform_driver(sdhci_driver); MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index bc1445238fb3..1dbe93232030 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host) u32 max_current_caps; unsigned int ocr_avail; unsigned int override_timeout_clk; + u32 max_clk; int ret; WARN_ON(host == NULL); @@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host) GFP_KERNEL); host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); if (!host->adma_table || !host->align_buffer) { - dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, - host->adma_table, host->adma_addr); + if (host->adma_table) + dma_free_coherent(mmc_dev(mmc), + host->adma_table_sz, + host->adma_table, + host->adma_addr); kfree(host->align_buffer); pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", mmc_hostname(mmc)); @@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host) * Set host parameters. */ mmc->ops = &sdhci_ops; - mmc->f_max = host->max_clk; + max_clk = host->max_clk; + if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else if (host->version >= SDHCI_SPEC_300) { if (host->clk_mul) { mmc->f_min = (host->max_clk * host->clk_mul) / 1024; - mmc->f_max = host->max_clk * host->clk_mul; + max_clk = host->max_clk * host->clk_mul; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; + if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk))) + mmc->f_max = max_clk; + if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 19eb990d398c..e1ccefce9a9d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); } +static struct slave *bond_get_old_active(struct bonding *bond, + struct slave *new_active) +{ + struct slave *slave; + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) { + if (slave == new_active) + continue; + + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) + return slave; + } + + return NULL; +} + /* bond_do_fail_over_mac * * Perform special MAC address swapping for fail_over_mac settings @@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, if (!new_active) return; + if (!old_active) + old_active = bond_get_old_active(bond, new_active); + if (old_active) { ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ether_addr_copy(saddr.sa_data, @@ -689,40 +709,57 @@ out: } -static bool bond_should_change_active(struct bonding *bond) +static struct slave *bond_choose_primary_or_current(struct bonding *bond) { struct slave *prim = rtnl_dereference(bond->primary_slave); struct slave *curr = rtnl_dereference(bond->curr_active_slave); - if (!prim || !curr || curr->link != BOND_LINK_UP) - return true; + if (!prim || prim->link != BOND_LINK_UP) { + if (!curr || curr->link != BOND_LINK_UP) + return NULL; + return curr; + } + if (bond->force_primary) { bond->force_primary = false; - return true; + return prim; + } + + if (!curr || curr->link != BOND_LINK_UP) + return prim; + + /* At this point, prim and curr are both up */ + switch (bond->params.primary_reselect) { + case BOND_PRI_RESELECT_ALWAYS: + return prim; + case BOND_PRI_RESELECT_BETTER: + if (prim->speed < curr->speed) + return curr; + if (prim->speed == curr->speed && prim->duplex <= curr->duplex) + return curr; + return prim; + case BOND_PRI_RESELECT_FAILURE: + return curr; + default: + netdev_err(bond->dev, "impossible primary_reselect %d\n", + bond->params.primary_reselect); + return curr; } - if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER && - (prim->speed < curr->speed || - (prim->speed == curr->speed && prim->duplex <= curr->duplex))) - return false; - if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE) - return false; - return true; } /** - * find_best_interface - select the best available slave to be the active one + * bond_find_best_slave - select the best available slave to be the active one * @bond: our bonding struct */ static struct slave *bond_find_best_slave(struct bonding *bond) { - struct slave *slave, *bestslave = NULL, *primary; + struct slave *slave, *bestslave = NULL; struct list_head *iter; int mintime = bond->params.updelay; - primary = rtnl_dereference(bond->primary_slave); - if (primary && primary->link == BOND_LINK_UP && - bond_should_change_active(bond)) - return primary; + slave = bond_choose_primary_or_current(bond); + if (slave) + return slave; bond_for_each_slave(bond, slave, iter) { if (slave->link == BOND_LINK_UP) @@ -1708,9 +1745,16 @@ err_free: err_undo_flags: /* Enslave of first slave has failed and we need to fix master's mac */ - if (!bond_has_slaves(bond) && - ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) - eth_hw_addr_random(bond_dev); + if (!bond_has_slaves(bond)) { + if (ether_addr_equal_64bits(bond_dev->dev_addr, + slave_dev->dev_addr)) + eth_hw_addr_random(bond_dev); + if (bond_dev->type != ARPHRD_ETHER) { + ether_setup(bond_dev); + bond_dev->flags |= IFF_MASTER; + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; + } + } return res; } @@ -1899,6 +1943,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", bond_dev->name); + bond_remove_proc_entry(bond); unregister_netdevice(bond_dev); } return ret; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f4e40aa4d2a2..945c0955a967 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); } /** @@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) } at91_read_mb(dev, mb, cf); - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); } @@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) return 0; at91_poll_err_frame(dev, cf, reg_sr); - netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev) return; at91_irq_err_state(dev, cf, new_state); - netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_rx(skb); priv->can.state = new_state; } diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 27ad312e7abf..57dadd52b428 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc) cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) @@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) priv->can.state = state; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 041525d2595c..5d214d135332 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); int err; + struct pinctrl *p; /* basic c_can configuration */ err = c_can_chip_config(dev); @@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev) priv->can.state = CAN_STATE_ERROR_ACTIVE; - /* activate pins */ - pinctrl_pm_select_default_state(dev->dev.parent); + /* Attempt to use "active" if available else use "default" */ + p = pinctrl_get_select(priv->device, "active"); + if (!IS_ERR(p)) + pinctrl_put(p); + else + pinctrl_pm_select_default_state(priv->device); + return 0; } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index c11d44984036..70a8cbb29e75 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) for (i = 0; i < cf->can_dlc; i++) cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static int cc770_err(struct net_device *dev, u8 status) @@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status) } } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index e9b1810d319f..aede704605c6 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) struct can_frame *cf = (struct can_frame *)skb->data; u8 dlc = cf->can_dlc; - if (!(skb->tstamp.tv64)) - __net_timestamp(skb); - netif_rx(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; @@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) if (unlikely(!skb)) return NULL; - __net_timestamp(skb); skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); memset(*cf, 0, sizeof(struct can_frame)); @@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, if (unlikely(!skb)) return NULL; - __net_timestamp(skb); skb->protocol = htons(ETH_P_CANFD); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame)); memset(*cfd, 0, sizeof(struct canfd_frame)); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 6201c5a1a884..b1e8d729851c 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) return 0; do_bus_err(dev, cf, reg_esr); - netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - netif_receive_skb(skb); - dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev) } flexcan_read_fifo(dev, cf); - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index e3d7e22a4fa0..db9538d4b358 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget) cf->data[i] = (u8)(slot[j] >> shift); } } - netif_receive_skb(skb); /* Update statistics and read pointer */ stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); } diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 7deb80dcbe8c..7bd54191f962 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -508,7 +508,8 @@ static int rcar_can_open(struct net_device *ndev) err = clk_prepare_enable(priv->clk); if (err) { - netdev_err(ndev, "failed to enable periperal clock, error %d\n", + netdev_err(ndev, + "failed to enable peripheral clock, error %d\n", err); goto out; } @@ -526,7 +527,8 @@ static int rcar_can_open(struct net_device *ndev) napi_enable(&priv->napi); err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); if (err) { - netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq); + netdev_err(ndev, "request_irq(%d) failed, error %d\n", + ndev->irq, err); goto out_close; } can_led_event(ndev, CAN_LED_EVENT_OPEN); @@ -758,8 +760,9 @@ static int rcar_can_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { + if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); + err = irq; goto fail; } @@ -782,7 +785,8 @@ static int rcar_can_probe(struct platform_device *pdev) priv->clk = devm_clk_get(&pdev->dev, "clkp1"); if (IS_ERR(priv->clk)) { err = PTR_ERR(priv->clk); - dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err); + dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n", + err); goto fail_clk; } @@ -794,7 +798,7 @@ static int rcar_can_probe(struct platform_device *pdev) priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); if (IS_ERR(priv->can_clk)) { err = PTR_ERR(priv->can_clk); - dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err); + dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err); goto fail_clk; } @@ -823,7 +827,7 @@ static int rcar_can_probe(struct platform_device *pdev) devm_can_led_init(ndev); - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", + dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", priv->regs, ndev->irq); return 0; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 32bd7f451aa4..7b92e911a616 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev) /* release receive buffer */ sja1000_write_cmdreg(priv, CMD_RRB); - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); can_led_event(dev, CAN_LED_EVENT_RX); } @@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) can_bus_off(dev); } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index f64f5290d6f8..9a3f15cb7ef4 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl) if (!skb) return; - __net_timestamp(skb); skb->dev = sl->dev; skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; @@ -215,13 +214,14 @@ static void slc_bump(struct slcan *sl) can_skb_reserve(skb); can_skb_prv(skb)->ifindex = sl->dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); - netif_rx_ni(skb); sl->dev->stats.rx_packets++; sl->dev->stats.rx_bytes += cf.can_dlc; + netif_rx_ni(skb); } /* parse tty input stream */ diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index c1a95a34d62e..b7e83c212023 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi) if (ret) goto out_clk; - priv->power = devm_regulator_get(&spi->dev, "vdd"); - priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); + priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); + priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { ret = -EPROBE_DEFER; @@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev) struct spi_device *spi = to_spi_device(dev); struct mcp251x_priv *priv = spi_get_drvdata(spi); - if (priv->after_suspend & AFTER_SUSPEND_POWER) { + if (priv->after_suspend & AFTER_SUSPEND_POWER) mcp251x_power_enable(priv->power, 1); + + if (priv->after_suspend & AFTER_SUSPEND_UP) { + mcp251x_power_enable(priv->transceiver, 1); queue_work(priv->wq, &priv->restart_work); } else { - if (priv->after_suspend & AFTER_SUSPEND_UP) { - mcp251x_power_enable(priv->transceiver, 1); - queue_work(priv->wq, &priv->restart_work); - } else { - priv->after_suspend = 0; - } + priv->after_suspend = 0; } + priv->force_quit = 0; enable_irq(spi->irq); return 0; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index e95a9e1a889f..cf345cbfe819 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 866bac0ae7e9..2d390384ef3b 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) cf->data[i] = msg->msg.can_msg.msg[i]; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) @@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) stats->rx_errors++; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } /* diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 411c1af92c62..0e5a4493ba4f 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, cf->data[7] = rxerr; } - netif_rx(skb); - priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } } @@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, cf->data[i] = msg->msg.rx.data[i]; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } return; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 72427f21edff..6b94007ae052 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, hwts->hwtstamp = timeval_to_ktime(tv); } - netif_rx(skb); mc->netdev->stats.rx_packets++; mc->netdev->stats.rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } @@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - /* push the skb */ - netif_rx(skb); - /* update statistics */ mc->netdev->stats.rx_packets++; mc->netdev->stats.rx_bytes += cf->can_dlc; + /* push the skb */ + netif_rx(skb); return 0; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index dec51717635e..7d61b3279798 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; + netif_rx(skb); return 0; } @@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index dd52c7a4c80d..de95b1ccba3e 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } /* Read data and status frames */ @@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, else memcpy(cf->data, msg->data, cf->can_dlc); - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); can_led_event(priv->netdev, CAN_LED_EVENT_RX); } else { diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 0ce868de855d..674f367087c5 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev) skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; - if (!(skb->tstamp.tv64)) - __net_timestamp(skb); - netif_rx_ni(skb); } diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 972982f8bea7..079897b3a955 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) } /* Include the pseudo-PHY address and the broadcast PHY address to - * divert reads towards our workaround + * divert reads towards our workaround. This is only required for + * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such + * that we can use the regular SWITCH_MDIO master controller instead. + * + * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask + * to have a 1:1 mapping between Port address and PHY address in order + * to utilize the slave_mii_bus instance to read from Port PHYs. This is + * not what we want here, so we initialize phys_mii_mask 0 to always + * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. */ - ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + if (of_machine_is_compatible("brcm,bcm7445d0")) + ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + else + ds->phys_mii_mask = 0; rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index fd8547c2b79d..561342466076 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) newfid = __ffs(ps->fid_mask); ps->fid[port] = newfid; - ps->fid_mask &= (1 << newfid); + ps->fid_mask &= ~(1 << newfid); ps->bridge_mask[fid] &= ~(1 << port); ps->bridge_mask[newfid] = 1 << port; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 41095ebad97f..2d1ce3c5d0dd 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2382,6 +2382,7 @@ boomerang_interrupt(int irq, void *dev_id) void __iomem *ioaddr; int status; int work_done = max_interrupt_work; + int handled = 0; ioaddr = vp->ioaddr; @@ -2400,6 +2401,7 @@ boomerang_interrupt(int irq, void *dev_id) if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs can cause this */ + handled = 1; if (status == 0xffff) { /* h/w no longer present (hotplug)? */ if (vortex_debug > 1) @@ -2501,7 +2503,7 @@ boomerang_interrupt(int irq, void *dev_id) handler_exit: vp->handling_irq = 0; spin_unlock(&vp->lock); - return IRQ_HANDLED; + return IRQ_RETVAL(handled); } static int vortex_rx(struct net_device *dev) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 661cdaa7ea96..b3bc87fe3764 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, get_page(pa->pages); bd->pa = *pa; - bd->dma = pa->pages_dma + pa->pages_offset; + bd->dma_base = pa->pages_dma; + bd->dma_off = pa->pages_offset; bd->dma_len = len; pa->pages_offset += len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 506e832c9e9a..a4473d8ff4fa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, unsigned int rx_usecs = pdata->rx_usecs; unsigned int rx_frames = pdata->rx_frames; unsigned int inte; + dma_addr_t hdr_dma, buf_dma; if (!rx_usecs && !rx_frames) { /* No coalescing, interrupt for every descriptor */ @@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, * Set buffer 2 (hi) address to buffer dma address (hi) and * set control bits OWN and INTE */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); - rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); - rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); + hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; + buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; + rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); + rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); + rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 1e9c28d19ef8..aae9d5ecd182 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, /* Start with the header buffer which may contain just the header * or the header plus data */ - dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma, - rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, + rdata->rx.hdr.dma_off, + rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); packet = page_address(rdata->rx.hdr.pa.pages) + rdata->rx.hdr.pa.pages_offset; @@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, len -= copy_len; if (len) { /* Add the remaining data as a frag */ - dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma, - rdata->rx.buf.dma_len, DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(pdata->dev, + rdata->rx.buf.dma_base, + rdata->rx.buf.dma_off, + rdata->rx.buf.dma_len, + DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rdata->rx.buf.pa.pages, @@ -1945,8 +1949,9 @@ read_again: if (!skb) error = 1; } else if (rdesc_len) { - dma_sync_single_for_cpu(pdata->dev, - rdata->rx.buf.dma, + dma_sync_single_range_for_cpu(pdata->dev, + rdata->rx.buf.dma_base, + rdata->rx.buf.dma_off, rdata->rx.buf.dma_len, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 63d72a140053..717ce21b6077 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -337,7 +337,8 @@ struct xgbe_buffer_data { struct xgbe_page_alloc pa; struct xgbe_page_alloc pa_unmap; - dma_addr_t dma; + dma_addr_t dma_base; + unsigned long dma_off; unsigned int dma_len; }; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 909ad7a0d480..4566cdf0bc39 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1793,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) macaddr = of_get_mac_address(dn); if (!macaddr || !is_valid_ether_addr(macaddr)) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); - random_ether_addr(dev->dev_addr); + eth_hw_addr_random(dev); } else { ether_addr_copy(dev->dev_addr, macaddr); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index b43b2cb9b830..64c1e9db6b0b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1230,7 +1230,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, new_skb = skb_realloc_headroom(skb, sizeof(*status)); dev_kfree_skb(skb); if (!new_skb) { - dev->stats.tx_errors++; dev->stats.tx_dropped++; return NULL; } @@ -1465,7 +1464,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (unlikely(!skb)) { dev->stats.rx_dropped++; - dev->stats.rx_errors++; goto next; } @@ -1493,7 +1491,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); - dev->stats.rx_dropped++; dev->stats.rx_errors++; dev_kfree_skb_any(skb); goto next; @@ -1515,7 +1512,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, dev->stats.rx_frame_errors++; if (dma_flag & DMA_RX_LG) dev->stats.rx_length_errors++; - dev->stats.rx_dropped++; dev->stats.rx_errors++; dev_kfree_skb_any(skb); goto next; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ac27e24264a5..f557a2aaec23 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s) __raw_writeq(reg, port); port = s->sbm_base + R_MAC_ETHERNET_ADDR; -#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS - /* - * Pass1 SOCs do not receive packets addressed to the - * destination address in the R_MAC_ETHERNET_ADDR register. - * Set the value to zero. - */ - __raw_writeq(0, port); -#else __raw_writeq(reg, port); -#endif /* * Set the receive filter for no packets, and write values diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 484eb8c37489..a11485fbb33f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -952,16 +952,23 @@ static int devlog_show(struct seq_file *seq, void *v) * eventually have to put a format interpreter in here ... */ seq_printf(seq, "%10d %15llu %8s %8s ", - e->seqno, e->timestamp, + be32_to_cpu(e->seqno), + be64_to_cpu(e->timestamp), (e->level < ARRAY_SIZE(devlog_level_strings) ? devlog_level_strings[e->level] : "UNKNOWN"), (e->facility < ARRAY_SIZE(devlog_facility_strings) ? devlog_facility_strings[e->facility] : "UNKNOWN")); - seq_printf(seq, e->fmt, e->params[0], e->params[1], - e->params[2], e->params[3], e->params[4], - e->params[5], e->params[6], e->params[7]); + seq_printf(seq, e->fmt, + be32_to_cpu(e->params[0]), + be32_to_cpu(e->params[1]), + be32_to_cpu(e->params[2]), + be32_to_cpu(e->params[3]), + be32_to_cpu(e->params[4]), + be32_to_cpu(e->params[5]), + be32_to_cpu(e->params[6]), + be32_to_cpu(e->params[7])); } return 0; } @@ -1043,23 +1050,17 @@ static int devlog_open(struct inode *inode, struct file *file) return ret; } - /* Translate log multi-byte integral elements into host native format - * and determine where the first entry in the log is. + /* Find the earliest (lowest Sequence Number) log entry in the + * circular Device Log. */ for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) { struct fw_devlog_e *e = &dinfo->log[index]; - int i; __u32 seqno; if (e->timestamp == 0) continue; - e->timestamp = (__force __be64)be64_to_cpu(e->timestamp); seqno = be32_to_cpu(e->seqno); - for (i = 0; i < 8; i++) - e->params[i] = - (__force __be32)be32_to_cpu(e->params[i]); - if (seqno < fseqno) { fseqno = seqno; dinfo->first = index; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index da2004e2a741..918a8e42139b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1170,7 +1170,7 @@ static int enic_poll(struct napi_struct *napi, int budget) wq_work_done, 0 /* dont unmask intr */, 0 /* dont reset intr timer */); - return rq_work_done; + return budget; } if (budget > 0) @@ -1191,6 +1191,7 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + enic_poll_unlock_napi(&enic->rq[cq_rq], napi); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1208,7 +1209,6 @@ static int enic_poll(struct napi_struct *napi, int budget) napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); } - enic_poll_unlock_napi(&enic->rq[cq_rq], napi); return rq_work_done; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 370e20ed224c..62e48bc0cb23 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { struct net_device *dev = pp->dev; - int rx_done, rx_filled; + int rx_done; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; @@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, rx_todo = rx_done; rx_done = 0; - rx_filled = 0; /* Fairness NAPI loop */ while (rx_done < rx_todo) { @@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, int rx_bytes, err; rx_done++; - rx_filled++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); data = (unsigned char *)rx_desc->buf_cookie; @@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, continue; } + /* Refill processing */ + err = mvneta_rx_refill(pp, rx_desc); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; + goto err_drop_frame; + } + skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); if (!skb) goto err_drop_frame; @@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, mvneta_rx_csum(pp, rx_status, skb); napi_gro_receive(&pp->napi, skb); - - /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc); - if (err) { - netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->missed++; - rx_filled--; - } } if (rcvd_pkts) { @@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } /* Update rxq management counters */ - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); return rx_done; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index fd9745714d90..78849dd4ef8e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) struct ravb_desc *desc = NULL; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; - struct sk_buff *skb; dma_addr_t dma_addr; - void *buffer; int i; priv->cur_rx[q] = 0; @@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q) memset(priv->rx_ring[q], 0, rx_ring_size); /* Build RX ring buffer */ for (i = 0; i < priv->num_rx_ring[q]; i++) { - priv->rx_skb[q][i] = NULL; - skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); - if (!skb) - break; - ravb_set_buffer_align(skb); /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; /* The size of the buffer should be on 16-byte boundary. */ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); - dma_addr = dma_map_single(&ndev->dev, skb->data, + dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, ALIGN(PKT_BUF_SZ, 16), DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, dma_addr)) { - dev_kfree_skb(skb); - break; - } - priv->rx_skb[q][i] = skb; + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(&ndev->dev, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->die_dt = DT_FEMPTY; } rx_desc = &priv->rx_ring[q][i]; rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->die_dt = DT_LINKFIX; /* type */ - priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ for (i = 0; i < priv->num_tx_ring[q]; i++) { - priv->tx_skb[q][i] = NULL; - priv->tx_buffers[q][i] = NULL; - buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); - if (!buffer) - break; - /* Aligned TX buffer */ - priv->tx_buffers[q][i] = buffer; tx_desc = &priv->tx_ring[q][i]; tx_desc->die_dt = DT_EEMPTY; } @@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + struct sk_buff *skb; int ring_size; + void *buffer; + int i; /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q) if (!priv->rx_skb[q] || !priv->tx_skb[q]) goto error; + for (i = 0; i < priv->num_rx_ring[q]; i++) { + skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); + if (!skb) + goto error; + ravb_set_buffer_align(skb); + priv->rx_skb[q][i] = skb; + } + /* Allocate rings for the aligned buffers */ priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], sizeof(*priv->tx_buffers[q]), GFP_KERNEL); if (!priv->tx_buffers[q]) goto error; + for (i = 0; i < priv->num_tx_ring[q]; i++) { + buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); + if (!buffer) + goto error; + /* Aligned TX buffer */ + priv->tx_buffers[q][i] = buffer; + } + /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, @@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) if (--boguscnt < 0) break; + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + if (desc_status & MSC_MC) stats->multicast++; @@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; - dma_sync_single_for_cpu(&ndev->dev, - le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), - DMA_FROM_DEVICE); + dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + ALIGN(PKT_BUF_SZ, 16), + DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; @@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), - DMA_FROM_DEVICE); dma_addr = dma_map_single(&ndev->dev, skb->data, le16_to_cpu(desc->ds_cc), DMA_FROM_DEVICE); skb_checksum_none_assert(skb); - if (dma_mapping_error(&ndev->dev, dma_addr)) { - dev_kfree_skb_any(skb); - break; - } + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(&ndev->dev, dma_addr)) + desc->ds_cc = cpu_to_le16(0); desc->dptr = cpu_to_le32(dma_addr); priv->rx_skb[q][entry] = skb; } @@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) u32 dma_addr; void *buffer; u32 entry; - u32 tccr; spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { @@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) dma_wmb(); desc->die_dt = DT_FSINGLE; - tccr = ravb_read(ndev, TCCR); - if (!(tccr & (TCCR_TSRQ0 << q))) - ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); + ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); priv->cur_tx[q]++; if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 847643455468..605cc8948594 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -101,6 +101,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) return resource_size(&efx->pci_dev->resource[bar]); } +static bool efx_ef10_is_vf(struct efx_nic *efx) +{ + return efx->type->is_vf; +} + static int efx_ef10_get_pf_index(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); @@ -677,6 +682,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx) return efx_ef10_probe(efx); } +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vport_add_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + +int efx_ef10_vport_del_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + #ifdef CONFIG_SFC_SRIOV static int efx_ef10_probe_vf(struct efx_nic *efx) { @@ -3804,6 +3851,72 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) WARN_ON(remove_failed); } +static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac_old[ETH_ALEN]; + int rc, rc2; + + /* Only reconfigure a PF-created vport */ + if (is_zero_ether_addr(nic_data->vport_mac)) + return 0; + + efx_device_detach_sync(efx); + efx_net_stop(efx->net_dev); + down_write(&efx->filter_sem); + efx_ef10_filter_table_remove(efx); + up_write(&efx->filter_sem); + + rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); + if (rc) + goto restore_filters; + + ether_addr_copy(mac_old, nic_data->vport_mac); + rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, + nic_data->vport_mac); + if (rc) + goto restore_vadaptor; + + rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, + efx->net_dev->dev_addr); + if (!rc) { + ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); + } else { + rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); + if (rc2) { + /* Failed to add original MAC, so clear vport_mac */ + eth_zero_addr(nic_data->vport_mac); + goto reset_nic; + } + } + +restore_vadaptor: + rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + if (rc2) + goto reset_nic; +restore_filters: + down_write(&efx->filter_sem); + rc2 = efx_ef10_filter_table_probe(efx); + up_write(&efx->filter_sem); + if (rc2) + goto reset_nic; + + rc2 = efx_net_open(efx->net_dev); + if (rc2) + goto reset_nic; + + netif_device_attach(efx->net_dev); + + return rc; + +reset_nic: + netif_err(efx, drv, efx->net_dev, + "Failed to restore when changing MAC address - scheduling reset\n"); + efx_schedule_reset(efx, RESET_TYPE_DATAPATH); + + return rc ? rc : rc2; +} + static int efx_ef10_set_mac_address(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); @@ -3820,8 +3933,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx->net_dev->dev_addr); MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, nic_data->vport_id); - rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, - sizeof(inbuf), NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, + sizeof(inbuf), NULL, 0, NULL); efx_ef10_filter_table_probe(efx); up_write(&efx->filter_sem); @@ -3829,38 +3942,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_net_open(efx->net_dev); netif_device_attach(efx->net_dev); -#if !defined(CONFIG_SFC_SRIOV) - if (rc == -EPERM) - netif_err(efx, drv, efx->net_dev, - "Cannot change MAC address; use sfboot to enable mac-spoofing" - " on this interface\n"); -#else - if (rc == -EPERM) { +#ifdef CONFIG_SFC_SRIOV + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - /* Switch to PF and change MAC address on vport */ - if (efx->pci_dev->is_virtfn && pci_dev_pf) { - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + if (rc == -EPERM) { + struct efx_nic *efx_pf; - if (!efx_ef10_sriov_set_vf_mac(efx_pf, - nic_data->vf_index, - efx->net_dev->dev_addr)) - return 0; - } - netif_err(efx, drv, efx->net_dev, - "Cannot change MAC address; use sfboot to enable mac-spoofing" - " on this interface\n"); - } else if (efx->pci_dev->is_virtfn) { - /* Successfully changed by VF (with MAC spoofing), so update the - * parent PF if possible. - */ - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; + /* Switch to PF and change MAC address on vport */ + efx_pf = pci_get_drvdata(pci_dev_pf); - if (pci_dev_pf) { + rc = efx_ef10_sriov_set_vf_mac(efx_pf, + nic_data->vf_index, + efx->net_dev->dev_addr); + } else if (!rc) { struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; unsigned int i; + /* MAC address successfully changed by VF (with MAC + * spoofing) so update the parent PF if possible. + */ for (i = 0; i < efx_pf->vf_count; ++i) { struct ef10_vf *vf = nic_data->vf + i; @@ -3871,8 +3973,24 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) } } } - } + } else #endif + if (rc == -EPERM) { + netif_err(efx, drv, efx->net_dev, + "Cannot change MAC address; use sfboot to enable" + " mac-spoofing on this interface\n"); + } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { + /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC + * fall-back to the method of changing the MAC address on the + * vport. This only applies to PFs because such versions of + * MCFW do not support VFs. + */ + rc = efx_ef10_vport_set_mac_address(efx); + } else { + efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, + sizeof(inbuf), NULL, 0, rc); + } + return rc; } diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 6c9b6e45509a..3c17f274e802 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id, NULL, 0, NULL); } -static int efx_ef10_vport_add_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); - - MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); - ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); - - return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, - sizeof(inbuf), NULL, 0, NULL); -} - -static int efx_ef10_vport_del_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); - - MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); - ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); - - return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, - sizeof(inbuf), NULL, 0, NULL); -} - static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, unsigned int vswitch_type) { @@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id) NULL, 0, NULL); } -static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); - - MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); - return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), - NULL, 0, NULL); -} - -static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); - - MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); - return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), - NULL, 0, NULL); -} - static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -640,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, vf->vlan, &vf->vport_id); if (rc) - goto reset_nic; + goto reset_nic_up_write; restore_mac: if (!is_zero_ether_addr(vf->mac)) { rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); if (rc2) { eth_zero_addr(vf->mac); - goto reset_nic; + goto reset_nic_up_write; } } restore_evb_port: rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); if (rc2) - goto reset_nic; + goto reset_nic_up_write; else vf->vport_assigned = 1; @@ -662,14 +620,16 @@ restore_vadaptor: if (vf->efx) { rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); if (rc2) - goto reset_nic; + goto reset_nic_up_write; } restore_filters: if (vf->efx) { rc2 = vf->efx->type->filter_table_probe(vf->efx); if (rc2) - goto reset_nic; + goto reset_nic_up_write; + + up_write(&vf->efx->filter_sem); up_write(&vf->efx->filter_sem); @@ -681,9 +641,12 @@ restore_filters: } return rc; +reset_nic_up_write: + if (vf->efx) + up_write(&vf->efx->filter_sem); + reset_nic: if (vf->efx) { - up_write(&vf->efx->filter_sem); netif_err(efx, drv, efx->net_dev, "Failed to restore VF - scheduling reset.\n"); efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index db4ef537c610..6d25b92cb45e 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); +int efx_ef10_vport_add_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac); +int efx_ef10_vport_del_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac); +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); #endif /* EF10_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 804b9ad553d3..03bc03b67f08 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx) */ static int efx_process_channel(struct efx_channel *channel, int budget) { + struct efx_tx_queue *tx_queue; int spent; if (unlikely(!channel->enabled)) return 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->pkts_compl = 0; + tx_queue->bytes_compl = 0; + } + spent = efx_nic_process_eventq(channel, budget); if (spent && efx_channel_has_rx_queue(channel)) { struct efx_rx_queue *rx_queue = @@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget) efx_fast_push_rx_descriptors(rx_queue, true); } + /* Update BQL */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (tx_queue->bytes_compl) { + netdev_tx_completed_queue(tx_queue->core_txq, + tx_queue->pkts_compl, tx_queue->bytes_compl); + } + } + return spent; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d72f522bf9c3..47d1e3a96522 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -241,6 +241,8 @@ struct efx_tx_queue { unsigned int read_count ____cacheline_aligned_in_smp; unsigned int old_write_count; unsigned int merge_events; + unsigned int bytes_compl; + unsigned int pkts_compl; /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index aaf2987512b5..1833a0146571 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); - netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; if (pkts_compl > 1) ++tx_queue->merge_events; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 50f7a7a26821..864b476f7fd5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device, if (res->mac) memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); - dev_set_drvdata(device, priv); + dev_set_drvdata(device, priv->dev); /* Verify driver arguments */ stmmac_verify_args(); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 462820514fae..d155bf2573cd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -138,19 +138,6 @@ do { \ #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) -#define cpsw_enable_irq(priv) \ - do { \ - u32 i; \ - for (i = 0; i < priv->num_irqs; i++) \ - enable_irq(priv->irqs_table[i]); \ - } while (0) -#define cpsw_disable_irq(priv) \ - do { \ - u32 i; \ - for (i = 0; i < priv->num_irqs; i++) \ - disable_irq_nosync(priv->irqs_table[i]); \ - } while (0) - #define cpsw_slave_index(priv) \ ((priv->data.dual_emac) ? priv->emac_port : \ priv->data.active_slave) @@ -509,9 +496,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { (func)(slave++, ##arg); \ } while (0) #define cpsw_get_slave_ndev(priv, __slave_no__) \ - (priv->slaves[__slave_no__].ndev) + ((__slave_no__ < priv->data.slaves) ? \ + priv->slaves[__slave_no__].ndev : NULL) #define cpsw_get_slave_priv(priv, __slave_no__) \ - ((priv->slaves[__slave_no__].ndev) ? \ + (((__slave_no__ < priv->data.slaves) && \ + (priv->slaves[__slave_no__].ndev)) ? \ netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ @@ -781,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) cpsw_intr_disable(priv); if (priv->irq_enabled == true) { - cpsw_disable_irq(priv); + disable_irq_nosync(priv->irqs_table[0]); priv->irq_enabled = false; } @@ -804,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) static int cpsw_poll(struct napi_struct *napi, int budget) { struct cpsw_priv *priv = napi_to_priv(napi); - int num_tx, num_rx; - - num_tx = cpdma_chan_process(priv->txch, 128); + int num_rx; num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { @@ -817,13 +804,12 @@ static int cpsw_poll(struct napi_struct *napi, int budget) prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { prim_cpsw->irq_enabled = true; - cpsw_enable_irq(priv); + enable_irq(priv->irqs_table[0]); } } - if (num_rx || num_tx) - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", - num_rx, num_tx); + if (num_rx) + cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); return num_rx; } @@ -1333,7 +1319,7 @@ static int cpsw_ndo_open(struct net_device *ndev) if (prim_cpsw->irq_enabled == false) { if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { prim_cpsw->irq_enabled = true; - cpsw_enable_irq(prim_cpsw); + enable_irq(prim_cpsw->irqs_table[0]); } } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 5ec4ed3f6c8d..ec8ed30196f3 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1617,11 +1617,11 @@ static int netcp_ndo_open(struct net_device *ndev) } mutex_unlock(&netcp_modules_lock); - netcp_rxpool_refill(netcp); napi_enable(&netcp->rx_napi); napi_enable(&netcp->tx_napi); knav_queue_enable_notify(netcp->tx_compl_q); knav_queue_enable_notify(netcp->rx_queue); + netcp_rxpool_refill(netcp); netif_tx_wake_all_queues(ndev); dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); return 0; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 4208dd7ef101..d95f9aae95e7 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev) /* Map device registers */ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); lp->regs = devm_ioremap_resource(&pdev->dev, ethres); - if (!lp->regs) { + if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); - ret = -ENOMEM; + ret = PTR_ERR(lp->regs); goto free_netdev; } @@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev) goto free_netdev; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); - if (!lp->dma_regs) { + if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); - ret = -ENOMEM; + ret = PTR_ERR(lp->dma_regs); goto free_netdev; } lp->rx_irq = irq_of_parse_and_map(np, 1); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 7856b6ccf5c5..d95a50ae996d 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev) memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); dev->flags = 0; + dev->features = NETIF_F_LLTX; /* Allow recursion */ #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) dev->header_ops = &ax25_header_ops; diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 953a97492fab..9542b7bac61a 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -67,8 +67,6 @@ struct ipvl_dev { struct ipvl_port *port; struct net_device *phy_dev; struct list_head addrs; - int ipv4cnt; - int ipv6cnt; struct ipvl_pcpu_stats __percpu *pcpu_stats; DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); netdev_features_t sfeatures; @@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) return rcu_dereference(d->rx_handler_data); } +static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d) +{ + return rcu_dereference_bh(d->rx_handler_data); +} + static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) { return rtnl_dereference(d->rx_handler_data); @@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, const void *iaddr, bool is_v6); -void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); +void ipvlan_ht_addr_del(struct ipvl_addr *addr); #endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8afbedad620d..207f62e8de9a 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); } -void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) +void ipvlan_ht_addr_del(struct ipvl_addr *addr) { hlist_del_init_rcu(&addr->hlnode); - if (sync) - synchronize_rcu(); } struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, @@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); + struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); if (!port) goto out; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1acc283160d9..20b58bdecf75 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev) else dev->flags &= ~IFF_NOARP; - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry(addr, &ipvlan->addrs, anode) - ipvlan_ht_addr_add(ipvlan, addr); - } + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_add(ipvlan, addr); + return dev_uc_add(phy_dev, phy_dev->dev_addr); } @@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev) dev_uc_del(phy_dev, phy_dev->dev_addr); - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry(addr, &ipvlan->addrs, anode) - ipvlan_ht_addr_del(addr, !dev->dismantle); - } + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_del(addr); + return 0; } @@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->port = port; ipvlan->sfeatures = IPVLAN_FEATURES; INIT_LIST_HEAD(&ipvlan->addrs); - ipvlan->ipv4cnt = 0; - ipvlan->ipv6cnt = 0; /* TODO Probably put random address here to be presented to the * world but keep using the physical-dev address for the outgoing @@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) struct ipvl_dev *ipvlan = netdev_priv(dev); struct ipvl_addr *addr, *next; - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { - ipvlan_ht_addr_del(addr, !dev->dismantle); - list_del(&addr->anode); - } + list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { + ipvlan_ht_addr_del(addr); + list_del(&addr->anode); + kfree_rcu(addr, rcu); } + list_del_rcu(&ipvlan->pnode); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(ipvlan->phy_dev, dev); @@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); addr->atype = IPVL_IPV6; list_add_tail(&addr->anode, &ipvlan->addrs); - ipvlan->ipv6cnt++; + /* If the interface is not up, the address will be added to the hash * list by ipvlan_open. */ @@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) if (!addr) return; - ipvlan_ht_addr_del(addr, true); + ipvlan_ht_addr_del(addr); list_del(&addr->anode); - ipvlan->ipv6cnt--; - WARN_ON(ipvlan->ipv6cnt < 0); kfree_rcu(addr, rcu); return; @@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused, struct net_device *dev = (struct net_device *)if6->idev->dev; struct ipvl_dev *ipvlan = netdev_priv(dev); + /* FIXME IPv6 autoconf calls us from bh without RTNL */ + if (in_softirq()) + return NOTIFY_DONE; + if (!netif_is_ipvlan(dev)) return NOTIFY_DONE; @@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); addr->atype = IPVL_IPV4; list_add_tail(&addr->anode, &ipvlan->addrs); - ipvlan->ipv4cnt++; + /* If the interface is not up, the address will be added to the hash * list by ipvlan_open. */ @@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) if (!addr) return; - ipvlan_ht_addr_del(addr, true); + ipvlan_ht_addr_del(addr); list_del(&addr->anode); - ipvlan->ipv4cnt--; - WARN_ON(ipvlan->ipv4cnt < 0); kfree_rcu(addr, rcu); return; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index f8370808a018..3b933bb5a8d5 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1355,6 +1355,7 @@ static void macvtap_exit(void) class_unregister(macvtap_class); cdev_del(&macvtap_cdev); unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); + idr_destroy(&minor_idr); } module_exit(macvtap_exit); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index cf18940f4e84..cb86d7a01542 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -191,7 +191,7 @@ config MDIO_BUS_MUX_GPIO config MDIO_BUS_MUX_MMIOREG tristate "Support for MMIO device-controlled MDIO bus multiplexers" - depends on OF_MDIO + depends on OF_MDIO && HAS_IOMEM select MDIO_BUS_MUX help This module provides a driver for MDIO bus multiplexers that diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index c7a12e2e07b7..8a3bf5469892 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev) return ret; } - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || + if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, DP83867_DEVADDR, phydev->addr); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 095ef3fe369a..46a14cbb0215 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { struct phy_device *phydev = to_phy_device(dev); struct phy_driver *phydrv = to_phy_driver(drv); + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); + int i; if (of_driver_match_device(dev, drv)) return 1; @@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) if (phydrv->match_phy_device) return phydrv->match_phy_device(phydev); - return (phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->phy_id & phydrv->phy_id_mask); + if (phydev->is_c45) { + for (i = 1; i < num_ids; i++) { + if (!(phydev->c45_ids.devices_in_package & (1 << i))) + continue; + + if ((phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->c45_ids.device_ids[i] & + phydrv->phy_id_mask)) + return 1; + } + return 0; + } else { + return (phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->phy_id & phydrv->phy_id_mask); + } } #ifdef CONFIG_PM diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 4545e78840b0..35a2bffe848a 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -523,6 +523,7 @@ static const struct driver_info wwan_info = { #define REALTEK_VENDOR_ID 0x0bda #define SAMSUNG_VENDOR_ID 0x04e8 #define LENOVO_VENDOR_ID 0x17ef +#define NVIDIA_VENDOR_ID 0x0955 static const struct usb_device_id products[] = { /* BLACKLIST !! @@ -710,6 +711,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index e4b7a47a825c..efc18e05af0a 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) goto err; - ret = cdc_ncm_bind_common(dev, intf, data_altsetting); + ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); if (ret) goto err; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8067b8fbb0ee..db40175b1a0b 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -6,7 +6,7 @@ * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> * * USB Host Driver for Network Control Model (NCM) - * http://www.usb.org/developers/devclass_docs/NCM10.zip + * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip * * The NCM encoding, decoding and initialization logic * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h @@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) ctx->tx_curr_skb = NULL; } + kfree(ctx->delayed_ndp16); + kfree(ctx); } -int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting) +int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) { const struct usb_cdc_union_desc *union_desc = NULL; struct cdc_ncm_ctx *ctx; @@ -855,6 +857,17 @@ advance: /* finish setting up the device specific data */ cdc_ncm_setup(dev); + /* Device-specific flags */ + ctx->drvflags = drvflags; + + /* Allocate the delayed NDP if needed. */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { + ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL); + if (!ctx->delayed_ndp16) + goto error2; + dev_info(&intf->dev, "NDP will be placed at end of frame for this device."); + } + /* override ethtool_ops */ dev->net->ethtool_ops = &cdc_ncm_ethtool_ops; @@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) return -ENODEV; - /* The NCM data altsetting is fixed */ - ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM); + /* The NCM data altsetting is fixed, so we hard-coded it. + * Additionally, generic NCM devices are assumed to accept arbitrarily + * placed NDP. + */ + ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); /* * We should get an event when network connection is "connected" or @@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data; size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex); + /* If NDP should be moved to the end of the NCM package, we can't follow the + * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and + * the wNdpIndex field in the header is actually not consistent with reality. It will be later. + */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) + if (ctx->delayed_ndp16->dwSignature == sign) + return ctx->delayed_ndp16; + /* follow the chain of NDPs, looking for a match */ while (ndpoffset) { ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); @@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ } /* align new NDP */ - cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); + if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)) + cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); /* verify that there is room for the NDP and the datagram (reserve) */ if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size) @@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ nth16->wNdpIndex = cpu_to_le16(skb->len); /* push a new empty NDP */ - ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); + if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)) + ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); + else + ndp16 = ctx->delayed_ndp16; + ndp16->dwSignature = sign; ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); return ndp16; @@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) struct sk_buff *skb_out; u16 n = 0, index, ndplen; u8 ready2send = 0; + u32 delayed_ndp_size; + + /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated + * accordingly. Otherwise, we should check here. + */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) + delayed_ndp_size = ctx->max_ndp_size; + else + delayed_ndp_size = 0; /* if there is a remaining skb, it gets priority */ if (skb != NULL) { @@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); /* check if we had enough room left for both NDP and frame */ - if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { + if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) { if (n == 0) { /* won't fit, MTU problem? */ dev_kfree_skb_any(skb); @@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) /* variables will be reset at next call */ } + /* If requested, put NDP at end of frame. */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { + nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; + cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max); + nth16->wNdpIndex = cpu_to_le16(skb_out->len); + memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size); + + /* Zero out delayed NDP - signature checking will naturally fail. */ + ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size); + } + /* If collected data size is less or equal ctx->min_tx_pkt * bytes, we send buffers as it is. If we get more data, it * would be more efficient for USB HS mobile device with DMA diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c index 735f7dadb9a0..2680a65cd5e4 100644 --- a/drivers/net/usb/huawei_cdc_ncm.c +++ b/drivers/net/usb/huawei_cdc_ncm.c @@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev, struct usb_driver *subdriver = ERR_PTR(-ENODEV); int ret = -ENODEV; struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data; + int drvflags = 0; /* altsetting should always be 1 for NCM devices - so we hard-coded - * it here + * it here. Some huawei devices will need the NDP part of the NCM package to + * be at the end of the frame. */ - ret = cdc_ncm_bind_common(usbnet_dev, intf, 1); + drvflags |= CDC_NCM_FLAG_NDP_TO_END; + ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); if (ret) goto err; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f603f362504b..9d43460ce3c7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -757,6 +757,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ + {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index aafa1a1898e4..7f6419ebb5e1 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -494,6 +494,7 @@ enum rtl8152_flags { #define VENDOR_ID_REALTEK 0x0bda #define VENDOR_ID_SAMSUNG 0x04e8 #define VENDOR_ID_LENOVO 0x17ef +#define VENDOR_ID_NVIDIA 0x0955 #define MCU_TYPE_PLA 0x0100 #define MCU_TYPE_USB 0x0000 @@ -4117,6 +4118,7 @@ static struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, + {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, {} }; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 63c7810e1545..7fbca37a1adf 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev) else vi->hdr_len = sizeof(struct virtio_net_hdr); - if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) vi->any_header_sg = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index da11bb5e9c7f..46f4caddccbe 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, static const u32 rxprod_reg[2] = { VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 }; - u32 num_rxd = 0; + u32 num_pkts = 0; bool skip_page_frags = false; struct Vmxnet3_RxCompDesc *rcd; struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; @@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxDesc *rxd; u32 idx, ring_idx; struct vmxnet3_cmd_ring *ring = NULL; - if (num_rxd >= quota) { + if (num_pkts >= quota) { /* we may stop even before we see the EOP desc of * the current pkt */ break; } - num_rxd++; BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); idx = rcd->rxdIdx; ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; @@ -1413,6 +1412,7 @@ not_lro: napi_gro_receive(&rq->napi, skb); ctx->skb = NULL; + num_pkts++; } rcd_done: @@ -1443,7 +1443,7 @@ rcd_done: &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); } - return num_rxd; + return num_pkts; } diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index feacc3b994b7..2f0bd6955f33 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -1044,7 +1044,7 @@ EXPORT_SYMBOL(z8530_sync_dma_close); * @dev: The network device to attach * @c: The Z8530 channel to configure in sync DMA mode. * - * Set up a Z85x30 device for synchronous DMA tranmission. One + * Set up a Z85x30 device for synchronous DMA transmission. One * ISA DMA channel must be available for this to work. The receive * side is run in PIO mode, but then it has the bigger FIFO. */ diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 5e15e8e10ed3..a31a6804dc34 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah) return; case AR9300_DEVID_QCA956X: ah->hw_version.macVersion = AR_SREV_VERSION_9561; + return; } val = REG_READ(ah, AR_SREV) & AR_SREV_ID; diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d56064861a9c..d45dc021cda2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 80fefe7d7b8c..3b8e85e51002 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev, hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_FAMILY_8000); - /* The byte order is little endian 16 bit, meaning 214365 */ - data->hw_addr[0] = hw_addr[1]; - data->hw_addr[1] = hw_addr[0]; - data->hw_addr[2] = hw_addr[3]; - data->hw_addr[3] = hw_addr[2]; - data->hw_addr[4] = hw_addr[5]; - data->hw_addr[5] = hw_addr[4]; + /* + * Store the MAC address from MAO section. + * No byte swapping is required in MAO section + */ + memcpy(data->hw_addr, hw_addr, ETH_ALEN); /* * Force the use of the OTP MAC address in case of reserved MAC diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 5e4cbdb44c60..737774a01c74 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -660,7 +660,8 @@ struct iwl_scan_config { * iwl_umac_scan_flags *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request * can be preempted by other scan requests with higher priority. - * The low priority scan is aborted. + * The low priority scan will be resumed when the higher proirity scan is + * completed. *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver * when scan starts. */ diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5de144968723..5000bfcded61 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); + if (type == IWL_MVM_SCAN_SCHED) + cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); + if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index d68dc697a4a0..26f076e82149 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; int ret; + static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; lockdep_assert_held(&mvm->mutex); @@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta->addr, ret); + sta ? sta->addr : zero_addr, ret); return ret; } diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index d24b6a83e68c..e472729e5f14 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, { lockdep_assert_held(&mvm->time_event_lock); - if (te_data->id == TE_MAX) + if (!te_data->vif) return; list_del(&te_data->list); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 7ba7a118ff5c..89116864d2a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, if (info->band == IEEE80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; else rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2ed1e4d2774d..9f65c1cff1b1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 3165 Series */ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, @@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 31f72a61cc3f..376b84e54ad7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -44,15 +44,6 @@ #include "iwl-io.h" #include "iwl-op-mode.h" -/* - * RX related structures and functions - */ -#define RX_NUM_QUEUES 1 -#define RX_POST_REQ_ALLOC 2 -#define RX_CLAIM_REQ_ALLOC 8 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) -#define RX_LOW_WATERMARK 8 - struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -86,29 +77,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @pool: + * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free - * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of RBDs with allocated RB ready for use - * @rx_used: list of RBDs with no RB attached + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: - * @pool: initial pool of iwl_rx_mem_buffer for the queue - * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; - u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -116,32 +107,6 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; -}; - -/** - * struct iwl_rb_allocator - Rx allocator - * @pool: initial pool of allocator - * @req_pending: number of requests the allcator had not processed yet - * @req_ready: number of requests honored and ready for claiming - * @rbd_allocated: RBDs with pages allocated and ready to be handled to - * the queue. This is a list of &struct iwl_rx_mem_buffer - * @rbd_empty: RBDs with no page attached for allocator use. This is a list - * of &struct iwl_rx_mem_buffer - * @lock: protects the rbd_allocated and rbd_empty lists - * @alloc_wq: work queue for background calls - * @rx_alloc: work struct for background calls - */ -struct iwl_rb_allocator { - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; - atomic_t req_pending; - atomic_t req_ready; - struct list_head rbd_allocated; - struct list_head rbd_empty; - spinlock_t lock; - struct workqueue_struct *alloc_wq; - struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rba: allocator for RX replenishing + * @rx_replenish: work that will be called when buffers need to be allocated * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct iwl_rb_allocator rba; + struct work_struct rx_replenish; struct iwl_trans *trans; struct iwl_drv *drv; diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index a3fbaa0ef5e0..adad8d0fae7f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,29 +74,16 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. - * When the interrupt handler is called, the request is processed. - * The page is either stolen - transferred to the upper layer - * or reused - added immediately to the iwl->rxq->rx_free list. - * + When the page is stolen - the driver updates the matching queue's used - * count, detaches the RBD and transfers it to the queue used list. - * When there are two used RBDs - they are transferred to the allocator empty - * list. Work is then scheduled for the allocator to start allocating - * eight buffers. - * When there are another 6 used RBDs - they are transferred to the allocator - * empty list and the driver tries to claim the pre-allocated buffers and - * add them to iwl->rxq->rx_free. If it fails - it continues to claim them - * until ready. - * When there are 8+ buffers in the free list - either from allocation or from - * 8 reused unstolen pages - restock is called to update the FW and indexes. - * + In order to make sure the allocator always has RBDs to use for allocation - * the allocator has initial pool in the size of num_queues*(8-2) - the - * maximum missing RBDs per allocation request (request posted with 2 - * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). - * The queues supplies the recycle of the rest of the RBDs. + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + If there are no allocated buffers in iwl->rxq->rx_free, + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the + * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -105,32 +92,18 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock. - * Used only during initialization. + * iwl_pcie_rxq_restock * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. - * iwl_pcie_rx_allocator() Background work for allocating pages. + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_pcie_rx_replenish * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. - * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. - * - * RBD life-cycle: - * - * Init: - * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue - * - * Regular Receive interrupt: - * Page Stolen: - * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> - * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue - * Page not Stolen: - * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } /* - * iwl_pcie_rx_alloc_page - allocates and returns a page. - * - */ -static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct page *page; - gfp_t gfp_mask = GFP_KERNEL; - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", - trans_pcie->rx_page_order); - /* Issue an error if the hardware has consumed more than half - * of its free buffer list and we don't have enough - * pre-allocated buffers. -` */ - if (rxq->free_count <= RX_LOW_WATERMARK && - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && - net_ratelimit()) - IWL_CRIT(trans, - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", - rxq->free_count); - return NULL; - } - return page; -} - -/* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again @@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; + gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) } spin_unlock(&rxq->lock); + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans); - if (!page) + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, " + "order: %d\n", + trans_pcie->rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(trans, "Failed to alloc_pages with %s." + "Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? + "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ return; + } spin_lock(&rxq->lock); @@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called only during initialization + * This is called as a scheduled work item (except for during initialization) */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) { - iwl_pcie_rxq_alloc_rbs(trans); + iwl_pcie_rxq_alloc_rbs(trans, gfp); iwl_pcie_rxq_restock(trans); } -/* - * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues - * - * Allocates for each received request 8 pages - * Called as a scheduled work item. - */ -static void iwl_pcie_rx_allocator(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - while (atomic_read(&rba->req_pending)) { - int i; - struct list_head local_empty; - struct list_head local_allocated; - - INIT_LIST_HEAD(&local_allocated); - spin_lock(&rba->lock); - /* swap out the entire rba->rbd_empty to a local list */ - list_replace_init(&rba->rbd_empty, &local_empty); - spin_unlock(&rba->lock); - - for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { - struct iwl_rx_mem_buffer *rxb; - struct page *page; - - /* List should never be empty - each reused RBD is - * returned to the list, and initial pool covers any - * possible gap between the time the page is allocated - * to the time the RBD is added. - */ - BUG_ON(list_empty(&local_empty)); - /* Get the first rxb from the rbd list */ - rxb = list_first_entry(&local_empty, - struct iwl_rx_mem_buffer, list); - BUG_ON(rxb->page); - - /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans); - if (!page) - continue; - rxb->page = page; - - /* Get physical address of the RB */ - rxb->page_dma = dma_map_page(trans->dev, page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, rxb->page_dma)) { - rxb->page = NULL; - __free_pages(page, trans_pcie->rx_page_order); - continue; - } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - /* move the allocated entry to the out list */ - list_move(&rxb->list, &local_allocated); - i++; - } - - spin_lock(&rba->lock); - /* add the allocated rbds to the allocator allocated list */ - list_splice_tail(&local_allocated, &rba->rbd_allocated); - /* add the unused rbds back to the allocator empty list */ - list_splice_tail(&local_empty, &rba->rbd_empty); - spin_unlock(&rba->lock); - - atomic_dec(&rba->req_pending); - atomic_inc(&rba->req_ready); - } -} - -/* - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages -.* -.* Called by queue when the queue posted allocation request and - * has freed 8 RBDs in order to restock itself. - */ -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, - struct iwl_rx_mem_buffer - *out[RX_CLAIM_REQ_ALLOC]) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - if (atomic_dec_return(&rba->req_ready) < 0) { - atomic_inc(&rba->req_ready); - IWL_DEBUG_RX(trans, - "Allocation request not ready, pending requests = %d\n", - atomic_read(&rba->req_pending)); - return -ENOMEM; - } - - spin_lock(&rba->lock); - for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { - /* Get next free Rx buffer, remove it from free list */ - out[i] = list_first_entry(&rba->rbd_allocated, - struct iwl_rx_mem_buffer, list); - list_del(&out[i]->list); - } - spin_unlock(&rba->lock); - - return 0; -} - -static void iwl_pcie_rx_allocator_work(struct work_struct *data) +static void iwl_pcie_rx_replenish_work(struct work_struct *data) { - struct iwl_rb_allocator *rba_p = - container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = - container_of(rba_p, struct iwl_trans_pcie, rba); + container_of(data, struct iwl_trans_pcie, rx_replenish); - iwl_pcie_rx_allocator(trans_pcie->trans); + iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); - spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; - rxq->used_count = 0; - for (i = 0; i < RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) -{ - int i; - - lockdep_assert_held(&rba->lock); - - INIT_LIST_HEAD(&rba->rbd_allocated); - INIT_LIST_HEAD(&rba->rbd_empty); - - for (i = 0; i < RX_POOL_SIZE; i++) - list_add(&rba->pool[i].list, &rba->rbd_empty); -} - -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - lockdep_assert_held(&rba->lock); - - for (i = 0; i < RX_POOL_SIZE; i++) { - if (!rba->pool[i].page) - continue; - dma_unmap_page(trans->dev, rba->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); - rba->pool[i].page = NULL; - } -} - int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } - if (!rba->alloc_wq) - rba->alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); - INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); - - spin_lock(&rba->lock); - atomic_set(&rba->req_pending, 0); - atomic_set(&rba->req_ready, 0); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rx_free_rba(trans); - iwl_pcie_rx_init_rba(rba); - spin_unlock(&rba->lock); spin_lock(&rxq->lock); + INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); + /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans); + iwl_pcie_rx_replenish(trans, GFP_KERNEL); iwl_pcie_rx_hw_init(trans, rxq); @@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&rba->rx_alloc); - if (rba->alloc_wq) { - destroy_workqueue(rba->alloc_wq); - rba->alloc_wq = NULL; - } - - spin_lock(&rba->lock); - iwl_pcie_rx_free_rba(trans); - spin_unlock(&rba->lock); + cancel_work_sync(&trans_pcie->rx_replenish); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } -/* - * iwl_pcie_rx_reuse_rbd - Recycle used RBDs - * - * Called when a RBD can be reused. The RBD is transferred to the allocator. - * When there are 2 empty RBDs - a request for allocation is posted - */ -static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb, - struct iwl_rxq *rxq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - /* Count the used RBDs */ - rxq->used_count++; - - /* Move the RBD to the used list, will be moved to allocator in batches - * before claiming or posting a request*/ - list_add_tail(&rxb->list, &rxq->rx_used); - - /* If we have RX_POST_REQ_ALLOC new released rx buffers - - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is - * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, - * after but we still need to post another request. - */ - if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { - /* Move the 2 RBDs to the allocator ownership. - Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); - - atomic_inc(&rba->req_pending); - queue_work(rba->alloc_wq, &rba->rx_alloc); - } -} - static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { @@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); + list_add_tail(&rxb->list, &rxq->rx_used); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); + list_add_tail(&rxb->list, &rxq->rx_used); } /* @@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i, j; + u32 r, i; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; restart: spin_lock(&rxq->lock); @@ -957,6 +720,14 @@ restart: if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + while (i != r) { struct iwl_rx_mem_buffer *rxb; @@ -968,48 +739,29 @@ restart: iwl_pcie_rx_handle_rb(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; - - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - - * try to claim the pre-allocated buffers from the allocator */ - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; - - /* Add the remaining 6 empty RBDs for allocator use */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); - - /* If not ready - continue, will try to reclaim later. - * No need to reschedule work - allocator exits only on - * success */ - if (!iwl_pcie_rx_allocator_get(trans, out)) { - /* If success - then RX_CLAIM_REQ_ALLOC - * buffers were retrieved and should be added - * to free list */ - rxq->used_count -= RX_CLAIM_REQ_ALLOC; - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { - list_add_tail(&out[j]->list, - &rxq->rx_free); - rxq->free_count++; - } + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + count = 0; + goto restart; } } - /* handle restock for two cases: - * - we just pulled buffers from the allocator - * - we have 8+ unstolen pages accumulated */ - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans); - goto restart; - } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); + if (fill_rx) + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + else + iwl_pcie_rxq_restock(trans); + if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 43ae658af6ec..6203c4ad9bba 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { - if (!trans->cfg->apmg_not_supported) + if (trans->cfg->apmg_not_supported) return; if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) @@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; u16 pci_cmd; - int err; + int ret; trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie, 0); @@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->ref_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); - err = pci_enable_device(pdev); - if (err) + ret = pci_enable_device(pdev); + if (ret) goto out_no_pci; if (!cfg->base_params->pcie_l1_allowed) { @@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ - if (err) { + if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto out_pci_disable_device; } } - err = pci_request_regions(pdev, DRV_NAME); - if (err) { + ret = pci_request_regions(pdev, DRV_NAME); + if (ret) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto out_pci_disable_device; } @@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); if (!trans_pcie->hw_base) { dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); - err = -ENODEV; + ret = -ENODEV; goto out_pci_release_regions; } @@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); - err = pci_enable_msi(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { @@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { unsigned long flags; - int ret; trans->hw_rev = (trans->hw_rev & 0xfff0) | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); + ret = iwl_pcie_prepare_card_hw(trans); + if (ret) { + IWL_WARN(trans, "Exit HW not ready\n"); + goto out_pci_disable_msi; + } + /* * in-order to recognize C step driver should read chip version * id located at the AUX bus MISC address space. @@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - if (iwl_pcie_alloc_ict(trans)) + ret = iwl_pcie_alloc_ict(trans); + if (ret) goto out_pci_disable_msi; - err = request_threaded_irq(pdev->irq, iwl_pcie_isr, + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); - if (err) { + if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } @@ -2617,5 +2623,5 @@ out_pci_disable_device: pci_disable_device(pdev); out_no_pci: iwl_trans_free(trans); - return ERR_PTR(err); + return ERR_PTR(ret); } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 880d0d63e872..7d50711476fe 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) smp_rmb(); while (dc != dp) { - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); pending_idx = queue->dealloc_ring[pending_index(dc++)]; - pending_idx_release[gop-queue->tx_unmap_ops] = + pending_idx_release[gop - queue->tx_unmap_ops] = pending_idx; - queue->pages_to_unmap[gop-queue->tx_unmap_ops] = + queue->pages_to_unmap[gop - queue->tx_unmap_ops] = queue->mmap_pages[pending_idx]; gnttab_set_unmap_op(gop, idx_to_kaddr(queue, pending_idx), diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index a5233422f9dc..7384455792bf 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, nvdimm_bus_unlock(dev); } if (is_nd_btt(dev) && probe) { + struct nd_btt *nd_btt = to_nd_btt(dev); + nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->btt_seed == dev) nd_region_create_btt_seed(nd_region); + if (nd_region->ns_seed == &nd_btt->ndns->dev && + is_nd_blk(dev->parent)) + nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } } diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 8067f54ce050..5ce5ef211bdb 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name, par_dev->dev.release = free_pardevice; par_dev->devmodel = true; ret = device_register(&par_dev->dev); - if (ret) - goto err_put_dev; + if (ret) { + put_device(&par_dev->dev); + goto err_put_port; + } /* Chain this onto the list */ par_dev->prev = NULL; @@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name, spin_unlock(&port->physport->pardevice_lock); pr_debug("%s: cannot grant exclusive access for device %s\n", port->name, name); - goto err_put_dev; + device_unregister(&par_dev->dev); + goto err_put_port; } port->flags |= PARPORT_FLAG_EXCL; } @@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name, return par_dev; -err_put_dev: - put_device(&par_dev->dev); err_free_devname: kfree(devname); err_free_par_dev: diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index c0e6ede3e27d..6b8dd162f644 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO config PHY_PXA_28NM_HSIC tristate "Marvell USB HSIC 28nm PHY Driver" + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support Marvell USB HSIC PHY driver for Marvell @@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC config PHY_PXA_28NM_USB2 tristate "Marvell USB 2.0 28nm PHY Driver" + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support Marvell USB 2.0 PHY driver for Marvell diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c index c6fc95b53083..335e06d66ed9 100644 --- a/drivers/phy/phy-berlin-usb.c +++ b/drivers/phy/phy-berlin-usb.c @@ -105,9 +105,9 @@ static const u32 phy_berlin_pll_dividers[] = { /* Berlin 2 */ - CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), - /* Berlin 2CD */ CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), + /* Berlin 2CD/Q */ + CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), }; struct phy_berlin_usb_priv { diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 53f295c1bab1..3510b81db3fa 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -28,7 +28,6 @@ #include <linux/delay.h> #include <linux/phy/omap_control_phy.h> #include <linux/of_platform.h> -#include <linux/spinlock.h> #define PLL_STATUS 0x00000004 #define PLL_GO 0x00000008 @@ -83,10 +82,6 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; - bool enabled; - spinlock_t lock; /* serialize clock enable/disable */ - /* the below flag is needed specifically for SATA */ - bool refclk_enabled; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy) return NULL; } +static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy); +static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy); + static int ti_pipe3_power_off(struct phy *x) { struct ti_pipe3 *phy = phy_get_drvdata(x); @@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x) u32 val; int ret = 0; + ti_pipe3_enable_clocks(phy); /* * Set pcie_pcs register to 0x96 for proper functioning of phy * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table @@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x) u32 val; unsigned long timeout; - /* SATA DPLL can't be powered down due to Errata i783 and PCIe - * does not have internal DPLL - */ - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || - of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) + /* SATA DPLL can't be powered down due to Errata i783 */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) return 0; - /* Put DPLL in IDLE mode */ - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); - val |= PLL_IDLE; - ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); - - /* wait for LDO and Oscillator to power down */ - timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); - do { - cpu_relax(); - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); - if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) - break; - } while (!time_after(jiffies, timeout)); + /* PCIe doesn't have internal DPLL */ + if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { + /* Put DPLL in IDLE mode */ + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); + val |= PLL_IDLE; + ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); - if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { - dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", - val); - return -EBUSY; + /* wait for LDO and Oscillator to power down */ + timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); + do { + cpu_relax(); + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); + if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) + break; + } while (!time_after(jiffies, timeout)); + + if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { + dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", + val); + return -EBUSY; + } } + ti_pipe3_disable_clocks(phy); + return 0; } static struct phy_ops ops = { @@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev) return -ENOMEM; phy->dev = &pdev->dev; - spin_lock_init(&phy->lock); if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { match = of_match_device(ti_pipe3_id_table, &pdev->dev); @@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); + /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) + if (!IS_ERR(phy->refclk)) + clk_prepare_enable(phy->refclk); generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) @@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev) if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); - pm_runtime_get(&pdev->dev); - return 0; } static int ti_pipe3_remove(struct platform_device *pdev) { - if (!pm_runtime_suspended(&pdev->dev)) - pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } -#ifdef CONFIG_PM -static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy) +static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) { - if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { - int ret; + int ret = 0; + if (!IS_ERR(phy->refclk)) { ret = clk_prepare_enable(phy->refclk); if (ret) { dev_err(phy->dev, "Failed to enable refclk %d\n", ret); return ret; } - phy->refclk_enabled = true; } - return 0; -} - -static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy) -{ - if (!IS_ERR(phy->refclk)) - clk_disable_unprepare(phy->refclk); - - phy->refclk_enabled = false; -} - -static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&phy->lock, flags); - if (phy->enabled) - goto err1; - - ret = ti_pipe3_enable_refclk(phy); - if (ret) - goto err1; - if (!IS_ERR(phy->wkupclk)) { ret = clk_prepare_enable(phy->wkupclk); if (ret) { dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); - goto err2; + goto disable_refclk; } } @@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) ret = clk_prepare_enable(phy->div_clk); if (ret) { dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); - goto err3; + goto disable_wkupclk; } } - phy->enabled = true; - spin_unlock_irqrestore(&phy->lock, flags); return 0; -err3: +disable_wkupclk: if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); -err2: +disable_refclk: if (!IS_ERR(phy->refclk)) clk_disable_unprepare(phy->refclk); - ti_pipe3_disable_refclk(phy); -err1: - spin_unlock_irqrestore(&phy->lock, flags); return ret; } static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) { - unsigned long flags; - - spin_lock_irqsave(&phy->lock, flags); - if (!phy->enabled) { - spin_unlock_irqrestore(&phy->lock, flags); - return; - } - if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); - /* Don't disable refclk for SATA PHY due to Errata i783 */ - if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) - ti_pipe3_disable_refclk(phy); + if (!IS_ERR(phy->refclk)) + clk_disable_unprepare(phy->refclk); if (!IS_ERR(phy->div_clk)) clk_disable_unprepare(phy->div_clk); - phy->enabled = false; - spin_unlock_irqrestore(&phy->lock, flags); -} - -static int ti_pipe3_runtime_suspend(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - - ti_pipe3_disable_clocks(phy); - return 0; } -static int ti_pipe3_runtime_resume(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - int ret = 0; - - ret = ti_pipe3_enable_clocks(phy); - return ret; -} - -static int ti_pipe3_suspend(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - - ti_pipe3_disable_clocks(phy); - return 0; -} - -static int ti_pipe3_resume(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - int ret; - - ret = ti_pipe3_enable_clocks(phy); - if (ret) - return ret; - - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - return 0; -} -#endif - -static const struct dev_pm_ops ti_pipe3_pm_ops = { - SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend, - ti_pipe3_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume) -}; - static const struct of_device_id ti_pipe3_id_table[] = { { .compatible = "ti,phy-usb3", @@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = { .remove = ti_pipe3_remove, .driver = { .name = "ti-pipe3", - .pm = &ti_pipe3_pm_ops, .of_match_table = ti_pipe3_id_table, }, }; diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index efcf2a2b3975..6177315ab74e 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data) spin_lock_irqsave(&pc->irq_lock[bank], flags); bcm2835_gpio_irq_config(pc, gpio, false); + /* Clear events that were latched prior to clearing event sources */ + bcm2835_gpio_set_bit(pc, GPEDS0, gpio); clear_bit(offset, &pc->enabled_irq_map[bank]); spin_unlock_irqrestore(&pc->irq_lock[bank], flags); } diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 5fd4437cee15..88a7fac11bd4 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev, unsigned num_configs) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); - const struct imx1_pinctrl_soc_info *info = ipctl->info; int i; for (i = 0; i != num_configs; ++i) { imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", - info->pins[pin_id].name); + pin_desc_get(pctldev, pin_id)->name); } return 0; diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 557d0f2a3031..97681fac082e 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = { .set_mux = abx500_pmx_set, .gpio_request_enable = abx500_gpio_request_enable, .gpio_disable_free = abx500_gpio_disable_free, - .strict = true, }; static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index ef0b697639a7..347c763a6a78 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (param) + if (param_val) *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); else *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); @@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (param) + if (param_val) *reg &= ~LPC18XX_SCU_PIN_ZIF; else *reg |= LPC18XX_SCU_PIN_ZIF; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index b2de09d3b1a0..0b8d480171a3 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs, int res; res = request_irq(pcs_soc->irq, pcs_irq_handler, - IRQF_SHARED | IRQF_NO_SUSPEND, + IRQF_SHARED | IRQF_NO_SUSPEND | + IRQF_NO_THREAD, name, pcs_soc); if (res) { pcs_soc->irq = -1; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 3dd5a3b2ac62..c760bf43d116 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -33,11 +33,6 @@ #include "../core.h" #include "pinctrl-samsung.h" -#define GROUP_SUFFIX "-grp" -#define GSUFFIX_LEN sizeof(GROUP_SUFFIX) -#define FUNCTION_SUFFIX "-mux" -#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX) - /* list of all possible config options supported */ static struct pin_config { const char *property; diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index c7508d5f6886..0874cfee6889 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -224,7 +224,7 @@ struct sh_pfc_soc_info { /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ #define _GP_GPIO(bank, _pin, _name, sfx) \ - [(bank * 32) + _pin] = { \ + { \ .pin = (bank * 32) + _pin, \ .name = __stringify(_name), \ .enum_id = _name##_DATA, \ diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c index f87a5eaf75da..0afaf79a4e51 100644 --- a/drivers/pinctrl/spear/pinctrl-spear.c +++ b/drivers/pinctrl/spear/pinctrl-spear.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * Inspired from: * - U300 Pinctl drivers diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h index dc8bf85ecb2a..27c2cc8d83ad 100644 --- a/drivers/pinctrl/spear/pinctrl-spear.h +++ b/drivers/pinctrl/spear/pinctrl-spear.h @@ -2,7 +2,7 @@ * Driver header file for the ST Microelectronics SPEAr pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c index a7bdc537efa7..92611bb757ac 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1310.c +++ b/drivers/pinctrl/spear/pinctrl-spear1310.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr1310 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -2730,7 +2730,7 @@ static void __exit spear1310_pinctrl_exit(void) } module_exit(spear1310_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c index f43ec85a0328..f842e9dc40d0 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1340.c +++ b/drivers/pinctrl/spear/pinctrl-spear1340.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr1340 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -2046,7 +2046,7 @@ static void __exit spear1340_pinctrl_exit(void) } module_exit(spear1340_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c index da8990a8eeef..d998a2ccff48 100644 --- a/drivers/pinctrl/spear/pinctrl-spear300.c +++ b/drivers/pinctrl/spear/pinctrl-spear300.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr300 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -703,7 +703,7 @@ static void __exit spear300_pinctrl_exit(void) } module_exit(spear300_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c index 31ede51e819b..609b18aceb16 100644 --- a/drivers/pinctrl/spear/pinctrl-spear310.c +++ b/drivers/pinctrl/spear/pinctrl-spear310.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr310 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -426,7 +426,7 @@ static void __exit spear310_pinctrl_exit(void) } module_exit(spear310_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c index 506e40b641e0..c07114431bd4 100644 --- a/drivers/pinctrl/spear/pinctrl-spear320.c +++ b/drivers/pinctrl/spear/pinctrl-spear320.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr320 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -3467,7 +3467,7 @@ static void __exit spear320_pinctrl_exit(void) } module_exit(spear320_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c index 12ee21af766b..d3119aafe709 100644 --- a/drivers/pinctrl/spear/pinctrl-spear3xx.c +++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr3xx pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h index 7860b36053c4..ce19dcf8f08b 100644 --- a/drivers/pinctrl/spear/pinctrl-spear3xx.h +++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h @@ -2,7 +2,7 @@ * Header file for the ST Microelectronics SPEAr3xx pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index ed317ccac4a2..aaeeae81e3a9 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -309,12 +309,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = { static struct calling_interface_buffer *buffer; static DEFINE_MUTEX(buffer_mutex); -static int hwswitch_state; +static void clear_buffer(void) +{ + memset(buffer, 0, sizeof(struct calling_interface_buffer)); +} static void get_buffer(void) { mutex_lock(&buffer_mutex); - memset(buffer, 0, sizeof(struct calling_interface_buffer)); + clear_buffer(); } static void release_buffer(void) @@ -548,21 +551,41 @@ static int dell_rfkill_set(void *data, bool blocked) int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; int hwswitch_bit = (unsigned long)data - 1; + int hwswitch; + int status; + int ret; get_buffer(); + + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + status = buffer->output[1]; + + if (ret != 0) + goto out; + + clear_buffer(); + + buffer->input[0] = 0x2; dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + hwswitch = buffer->output[1]; /* If the hardware switch controls this radio, and the hardware switch is disabled, always disable the radio */ - if ((hwswitch_state & BIT(hwswitch_bit)) && - !(buffer->output[1] & BIT(16))) + if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) && + (status & BIT(0)) && !(status & BIT(16))) disable = 1; + clear_buffer(); + buffer->input[0] = (1 | (radio<<8) | (disable << 16)); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + out: release_buffer(); - return 0; + return dell_smi_error(ret); } /* Must be called with the buffer held */ @@ -572,6 +595,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, if (status & BIT(0)) { /* Has hw-switch, sync sw_state to BIOS */ int block = rfkill_blocked(rfkill); + clear_buffer(); buffer->input[0] = (1 | (radio << 8) | (block << 16)); dell_send_request(buffer, 17, 11); } else { @@ -581,23 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, } static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, - int status) + int status, int hwswitch) { - if (hwswitch_state & (BIT(radio - 1))) + if (hwswitch & (BIT(radio - 1))) rfkill_set_hw_state(rfkill, !(status & BIT(16))); } static void dell_rfkill_query(struct rfkill *rfkill, void *data) { + int radio = ((unsigned long)data & 0xF); + int hwswitch; int status; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; - dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); + if (ret != 0 || !(status & BIT(0))) { + release_buffer(); + return; + } + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + hwswitch = buffer->output[1]; release_buffer(); + + if (ret != 0) + return; + + dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch); } static const struct rfkill_ops dell_rfkill_ops = { @@ -609,13 +653,27 @@ static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) { + int hwswitch_state; + int hwswitch_ret; int status; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + hwswitch_ret = buffer->output[0]; + hwswitch_state = buffer->output[1]; + release_buffer(); + seq_printf(s, "return:\t%d\n", ret); seq_printf(s, "status:\t0x%X\n", status); seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", status & BIT(0)); @@ -657,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "Bit 21: WiGig is blocked: %lu\n", (status & BIT(21)) >> 21); - seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state); + seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret); + seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state); seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n", hwswitch_state & BIT(0)); seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n", @@ -693,25 +752,43 @@ static const struct file_operations dell_debugfs_fops = { static void dell_update_rfkill(struct work_struct *ignored) { + int hwswitch = 0; int status; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; + if (ret != 0) + goto out; + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + + if (ret == 0 && (status & BIT(0))) + hwswitch = buffer->output[1]; + if (wifi_rfkill) { - dell_rfkill_update_hw_state(wifi_rfkill, 1, status); + dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch); dell_rfkill_update_sw_state(wifi_rfkill, 1, status); } if (bluetooth_rfkill) { - dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status); + dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status, + hwswitch); dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status); } if (wwan_rfkill) { - dell_rfkill_update_hw_state(wwan_rfkill, 3, status); + dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch); dell_rfkill_update_sw_state(wwan_rfkill, 3, status); } + out: release_buffer(); } static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); @@ -773,21 +850,17 @@ static int __init dell_setup_rfkill(void) get_buffer(); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; - buffer->input[0] = 0x2; - dell_send_request(buffer, 17, 11); - hwswitch_state = buffer->output[1]; release_buffer(); - if (!(status & BIT(0))) { - if (force_rfkill) { - /* No hwsitch, clear all hw-controlled bits */ - hwswitch_state &= ~7; - } else { - /* rfkill is only tested on laptops with a hwswitch */ - return 0; - } - } + /* dell wireless info smbios call is not supported */ + if (ret != 0) + return 0; + + /* rfkill is only tested on laptops with a hwswitch */ + if (!(status & BIT(0)) && !force_rfkill) + return 0; if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, @@ -932,47 +1005,50 @@ static void dell_cleanup_rfkill(void) static int dell_send_intensity(struct backlight_device *bd) { - int ret = 0; + int token; + int ret; + + token = find_token_location(BRIGHTNESS_TOKEN); + if (token == -1) + return -ENODEV; get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); + buffer->input[0] = token; buffer->input[1] = bd->props.brightness; - if (buffer->input[0] == -1) { - ret = -ENODEV; - goto out; - } - if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 1, 2); else dell_send_request(buffer, 1, 1); - out: + ret = dell_smi_error(buffer->output[0]); + release_buffer(); return ret; } static int dell_get_intensity(struct backlight_device *bd) { - int ret = 0; + int token; + int ret; - get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); + token = find_token_location(BRIGHTNESS_TOKEN); + if (token == -1) + return -ENODEV; - if (buffer->input[0] == -1) { - ret = -ENODEV; - goto out; - } + get_buffer(); + buffer->input[0] = token; if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 0, 2); else dell_send_request(buffer, 0, 1); - ret = buffer->output[1]; + if (buffer->output[0]) + ret = dell_smi_error(buffer->output[0]); + else + ret = buffer->output[1]; - out: release_buffer(); return ret; } @@ -2036,6 +2112,7 @@ static void kbd_led_exit(void) static int __init dell_init(void) { int max_intensity = 0; + int token; int ret; if (!dmi_check_system(dell_device_table)) @@ -2094,13 +2171,15 @@ static int __init dell_init(void) if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; - get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); - if (buffer->input[0] != -1) { + token = find_token_location(BRIGHTNESS_TOKEN); + if (token != -1) { + get_buffer(); + buffer->input[0] = token; dell_send_request(buffer, 0, 2); - max_intensity = buffer->output[3]; + if (buffer->output[0] == 0) + max_intensity = buffer->output[3]; + release_buffer(); } - release_buffer(); if (max_intensity) { struct backlight_properties props; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index d734763dab69..105cfffe82c6 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev { struct completion cmd_complete; /* The following PMC BARs share the same ACPI device with the IPC */ - void *acpi_io_base; + resource_size_t acpi_io_base; int acpi_io_size; struct platform_device *tco_dev; /* gcr */ - void *gcr_base; + resource_size_t gcr_base; int gcr_size; /* punit */ - void *punit_base; + resource_size_t punit_base; int punit_size; - void *punit_base2; + resource_size_t punit_base2; int punit_size2; struct platform_device *punit_dev; } ipcdev; @@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void) return ret; } -/* - * intel_pmc_ipc_simple_command - * @cmd: command - * @sub: sub type +/** + * intel_pmc_ipc_simple_command() - Simple IPC command + * @cmd: IPC command code. + * @sub: IPC command sub type. + * + * Send a simple IPC command to PMC when don't need to specify + * input/output data and source/dest pointers. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_simple_command(int cmd, int sub) { @@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub) } EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); -/* - * intel_pmc_ipc_raw_cmd - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords - * @sptr: data writing to SPTR register - * @dptr: data writing to DPTR register +/** + * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers + * @cmd: IPC command code. + * @sub: IPC command sub type. + * @in: input data of this IPC command. + * @inlen: input data length in bytes. + * @out: output data of this IPC command. + * @outlen: output data length in dwords. + * @sptr: data writing to SPTR register. + * @dptr: data writing to DPTR register. + * + * Send an IPC command to PMC with input/output data and source/dest pointers. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen, u32 dptr, u32 sptr) @@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, } EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); -/* - * intel_pmc_ipc_command - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords +/** + * intel_pmc_ipc_command() - IPC command with input/output data + * @cmd: IPC command code. + * @sub: IPC command sub type. + * @in: input data of this IPC command. + * @inlen: input data length in bytes. + * @out: output data of this IPC command. + * @outlen: output data length in dwords. + * + * Send an IPC command to PMC with input/output data. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen) @@ -480,11 +493,11 @@ static int ipc_create_punit_device(void) pdev->dev.parent = ipcdev.dev; res = punit_res; - res->start = (resource_size_t)ipcdev.punit_base; + res->start = ipcdev.punit_base; res->end = res->start + ipcdev.punit_size - 1; res = punit_res + PUNIT_RESOURCE_INTER; - res->start = (resource_size_t)ipcdev.punit_base2; + res->start = ipcdev.punit_base2; res->end = res->start + ipcdev.punit_size2 - 1; ret = platform_device_add_resources(pdev, punit_res, @@ -522,15 +535,15 @@ static int ipc_create_tco_device(void) pdev->dev.parent = ipcdev.dev; res = tco_res + TCO_RESOURCE_ACPI_IO; - res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET; + res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET; res->end = res->start + TCO_REGS_SIZE - 1; res = tco_res + TCO_RESOURCE_SMI_EN_IO; - res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET; + res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET; res->end = res->start + SMI_EN_SIZE - 1; res = tco_res + TCO_RESOURCE_GCR_MEM; - res->start = (resource_size_t)ipcdev.gcr_base; + res->start = ipcdev.gcr_base; res->end = res->start + ipcdev.gcr_size - 1; ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res)); @@ -589,7 +602,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.acpi_io_base = (void *)res->start; + ipcdev.acpi_io_base = res->start; ipcdev.acpi_io_size = size; dev_info(&pdev->dev, "io res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -601,7 +614,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.punit_base = (void *)res->start; + ipcdev.punit_base = res->start; ipcdev.punit_size = size; dev_info(&pdev->dev, "punit data res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -613,7 +626,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.punit_base2 = (void *)res->start; + ipcdev.punit_base2 = res->start; ipcdev.punit_size2 = size; dev_info(&pdev->dev, "punit interface res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -637,7 +650,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) } ipcdev.ipc_base = addr; - ipcdev.gcr_base = (void *)(res->start + size); + ipcdev.gcr_base = res->start + size; ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE; dev_info(&pdev->dev, "ipc res: %llx %x\n", (long long)res->start, (int)resource_size(res)); diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 001b199a8c33..187d1086d15c 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -216,13 +216,13 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) int nc; u32 offset = 0; int err; - u8 cbuf[IPC_WWBUF_SIZE] = { }; + u8 cbuf[IPC_WWBUF_SIZE]; u32 *wbuf = (u32 *)&cbuf; - mutex_lock(&ipclock); - memset(cbuf, 0, sizeof(cbuf)); + mutex_lock(&ipclock); + if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c index 832932bdc977..7fd4f511d78f 100644 --- a/drivers/regulator/88pm800.c +++ b/drivers/regulator/88pm800.c @@ -130,7 +130,7 @@ struct pm800_regulators { .owner = THIS_MODULE, \ .n_voltages = ARRAY_SIZE(ldo_volt_table), \ .vsel_reg = PM800_##vreg##_VOUT, \ - .vsel_mask = 0x1f, \ + .vsel_mask = 0xf, \ .enable_reg = PM800_##ereg, \ .enable_mask = 1 << (ebit), \ .volt_table = ldo_volt_table, \ diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index c9f72019bd68..78387a6cbae5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name); +static void _regulator_put(struct regulator *regulator); static const char *rdev_get_name(struct regulator_dev *rdev) { @@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev, rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); + if (!try_module_get(supply_rdev->owner)) + return -ENODEV; + rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { err = -ENOMEM; @@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) } if (!r) { - dev_err(dev, "Failed to resolve %s-supply for %s\n", - rdev->supply_name, rdev->desc->name); - return -EPROBE_DEFER; + if (have_full_constraints()) { + r = dummy_regulator_rdev; + } else { + dev_err(dev, "Failed to resolve %s-supply for %s\n", + rdev->supply_name, rdev->desc->name); + return -EPROBE_DEFER; + } } /* Recursively resolve the supply of the supply */ @@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) /* Cascade always-on state to supply */ if (_regulator_is_enabled(rdev)) { ret = regulator_enable(rdev->supply); - if (ret < 0) + if (ret < 0) { + if (rdev->supply) + _regulator_put(rdev->supply); return ret; + } } return 0; diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index 6f2bdad8b4d8..e94ddcf97722 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c @@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt( pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; if (of_property_read_bool(np, "maxim,enable-bias-control")) - pdata->control_flags |= MAX8973_BIAS_ENABLE; + pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE; return pdata; } diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 326ffb553371..72fc3c32db49 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -34,6 +34,8 @@ #include <linux/mfd/samsung/s2mps14.h> #include <linux/mfd/samsung/s2mpu02.h> +/* The highest number of possible regulators for supported devices. */ +#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX struct s2mps11_info { unsigned int rdev_num; int ramp_delay2; @@ -49,7 +51,7 @@ struct s2mps11_info { * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether * the suspend mode was enabled. */ - unsigned long long s2mps14_suspend_state:50; + DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX); /* Array of size rdev_num with GPIO-s for external sleep control */ int *ext_control_gpio; @@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) switch (s2mps11->dev_type) { case S2MPS13X: case S2MPS14X: - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) val = S2MPS14_ENABLE_SUSPEND; else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) val = S2MPS14_ENABLE_EXT_CONTROL; @@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) val = rdev->desc->enable_mask; break; case S2MPU02: - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) val = S2MPU02_ENABLE_SUSPEND; else val = rdev->desc->enable_mask; @@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev) if (ret < 0) return ret; - s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); + set_bit(rdev_get_id(rdev), s2mps11->suspend_state); /* * Don't enable suspend mode if regulator is already disabled because * this would effectively for a short time turn on the regulator after @@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) case S2MPS11X: s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); regulators = s2mps11_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPS13X: s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); regulators = s2mps13_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPS14X: s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); regulators = s2mps14_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPU02: s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); regulators = s2mpu02_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; default: dev_err(&pdev->dev, "Invalid device type: %u\n", diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index 4b62d1a875e4..2b08cac62f07 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c @@ -88,7 +88,7 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); int ret = 0; - unsigned long time, flags; + unsigned long time; ret = rtc_tm_to_time(tm, &time); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index c0090b698ff3..eab230be5a54 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -343,6 +343,8 @@ static int mtk_rtc_probe(struct platform_device *pdev) goto out_dispose_irq; } + device_init_wakeup(&pdev->dev, 1); + rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev, &mtk_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { @@ -351,8 +353,6 @@ static int mtk_rtc_probe(struct platform_device *pdev) goto out_free_irq; } - device_init_wakeup(&pdev->dev, 1); - return 0; out_free_irq: diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 95bccfd3f169..e5225ad9c5b1 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -2,7 +2,7 @@ # Makefile for the S/390 specific device drivers # -obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ +obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/ drivers-y += drivers/s390/built-in.o diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1aec8ff0b587..f73d2f579a7e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1863,6 +1863,33 @@ static void __dasd_device_check_expire(struct dasd_device *device) } /* + * return 1 when device is not eligible for IO + */ +static int __dasd_device_is_unusable(struct dasd_device *device, + struct dasd_ccw_req *cqr) +{ + int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); + + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + /* dasd is being set offline. */ + return 1; + } + if (device->stopped) { + if (device->stopped & mask) { + /* stopped and CQR will not change that. */ + return 1; + } + if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { + /* CQR is not able to change device to + * operational. */ + return 1; + } + /* CQR required to get device operational. */ + } + return 0; +} + +/* * Take a look at the first request on the ccw queue and check * if it needs to be started. */ @@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if (cqr->status != DASD_CQR_QUEUED) return; - /* when device is stopped, return request to previous layer - * exception: only the disconnect or unresumed bits are set and the - * cqr is a path verification request - */ - if (device->stopped && - !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) - && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { + /* if device is not usable return request to upper layer */ + if (__dasd_device_is_unusable(device, cqr)) { cqr->intrc = -EAGAIN; cqr->status = DASD_CQR_CLEARED; dasd_schedule_device_bh(device); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index a2597e683e79..ee3a6faae22a 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) struct dasd_device, alias_list); spin_unlock_irqrestore(&lcu->lock, flags); alias_priv = (struct dasd_eckd_private *) alias_device->private; - if ((alias_priv->count < private->count) && !alias_device->stopped) + if ((alias_priv->count < private->count) && !alias_device->stopped && + !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags)) return alias_device; else return NULL; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index aeed7969fd79..7bc6df3100ef 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -7,6 +7,7 @@ #define KMSG_COMPONENT "sclp_early" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/errno.h> #include <asm/ctl_reg.h> #include <asm/sclp.h> #include <asm/ipl.h> diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 08f1830cbfc4..01bf1f5cf2e9 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +static int zcrypt_hwrng_seed = 1; +module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); +MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); + static DEFINE_SPINLOCK(zcrypt_device_lock); static LIST_HEAD(zcrypt_device_list); static int zcrypt_device_count = 0; @@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) static struct hwrng zcrypt_rng_dev = { .name = "zcrypt", .data_read = zcrypt_rng_data_read, + .quality = 990, }; static int zcrypt_rng_device_add(void) @@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void) goto out; } zcrypt_rng_buffer_index = 0; + if (!zcrypt_hwrng_seed) + zcrypt_rng_dev.quality = 0; rc = hwrng_register(&zcrypt_rng_dev); if (rc) goto out_free; diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile index 241891a57caf..241891a57caf 100644 --- a/drivers/s390/kvm/Makefile +++ b/drivers/s390/virtio/Makefile diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 53fb975c404b..53fb975c404b 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index f8d8fdb26b72..f8d8fdb26b72 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 1ac38e73df7e..9ad41168d26d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, depth = simple_strtoul(buf, NULL, 0); - if (depth < 1 || depth > sht->can_queue) + if (depth < 1 || depth > sdev->host->can_queue) return -EINVAL; retval = sht->change_queue_depth(sdev, depth); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index a85292b1d09d..e3cd3ece4412 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo) return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); } -static int srp_parse_tmo(int *tmo, const char *buf) +int srp_parse_tmo(int *tmo, const char *buf) { int res = 0; @@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf) return res; } +EXPORT_SYMBOL(srp_parse_tmo); static ssize_t show_reconnect_delay(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 3f25b8fa921d..871f3553987d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1329,9 +1329,9 @@ static int st_open(struct inode *inode, struct file *filp) spin_lock(&st_use_lock); STp->in_use = 0; spin_unlock(&st_use_lock); - scsi_tape_put(STp); if (resumed) scsi_autopm_put_device(STp->device); + scsi_tape_put(STp); return retval; } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 285f77544c36..7dbbb29d24c6 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; - int err, host_prot; + int err; u32 sg_elems, num_targets; u32 cmd_per_lun; u32 num_queues; @@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev) #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + int host_prot; + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 0cae1694014d..b0f30fb68914 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA config SPI_ZYNQMP_GQSPI tristate "Xilinx ZynqMP GQSPI controller" - depends on SPI_MASTER + depends on SPI_MASTER && HAS_DMA help Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 788e2b176a4f..acce90ac7371 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -40,6 +40,7 @@ #define SPFI_CONTROL_SOFT_RESET BIT(11) #define SPFI_CONTROL_SEND_DMA BIT(10) #define SPFI_CONTROL_GET_DMA BIT(9) +#define SPFI_CONTROL_SE BIT(8) #define SPFI_CONTROL_TMODE_SHIFT 5 #define SPFI_CONTROL_TMODE_MASK 0x7 #define SPFI_CONTROL_TMODE_SINGLE 0 @@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, else if (xfer->tx_nbits == SPI_NBITS_QUAD && xfer->rx_nbits == SPI_NBITS_QUAD) val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; + val |= SPFI_CONTROL_SE; spfi_writel(spfi, val, SPFI_CONTROL); } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index eb7d3a6fb14c..f9deb84e4e55 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, { struct spi_imx_data *spi_imx = spi_master_get_devdata(master); - if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) - && (transfer->len > spi_imx->tx_wml)) + if (spi_imx->dma_is_inited + && transfer->len > spi_imx->rx_wml * sizeof(u32) + && transfer->len > spi_imx->tx_wml * sizeof(u32)) return true; return false; } diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 87b20a511a6b..f23f36ebaf3d 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, case GQSPI_SELECT_FLASH_CS_BOTH: instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | GQSPI_GENFIFO_CS_UPPER; + break; case GQSPI_SELECT_FLASH_CS_UPPER: instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; break; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index dd616ff0ffc5..c7de64171c45 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -693,6 +693,7 @@ static struct class *spidev_class; #ifdef CONFIG_OF static const struct of_device_id spidev_dt_ids[] = { { .compatible = "rohm,dh2228fv" }, + { .compatible = "lineartechnology,ltc2488" }, {}, }; MODULE_DEVICE_TABLE(of, spidev_dt_ids); diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig index b8ee81840666..3f287c48e082 100644 --- a/drivers/staging/board/Kconfig +++ b/drivers/staging/board/Kconfig @@ -1,6 +1,6 @@ config STAGING_BOARD bool "Staging Board Support" - depends on OF_ADDRESS + depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP help Select to enable per-board staging support code. diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index 7125eb955ae5..8a9d4a0de129 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -31,7 +31,6 @@ #define DEBUG_PORTAL_ALLOC #define DEBUG_SUBSYSTEM S_LND -#include <asm/irq.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/if.h> diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index ed040fbb7df8..b0c8e235b982 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1418,7 +1418,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->current_aid = conf->aid; - if (changed & BSS_CHANGED_BSSID) { + if (changed & BSS_CHANGED_BSSID && conf->bssid) { unsigned long flags; spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index f97323f19acf..af572d718135 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->current_aid = conf->aid; - if (changed & BSS_CHANGED_BSSID) + if (changed & BSS_CHANGED_BSSID && conf->bssid) vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index c9c27f69e101..ee8bfacf2071 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty) * Locking: ctrl_lock */ -static void isig(int sig, struct tty_struct *tty) +static void __isig(int sig, struct tty_struct *tty) { - struct n_tty_data *ldata = tty->disc_data; struct pid *tty_pgrp = tty_get_pgrp(tty); if (tty_pgrp) { kill_pgrp(tty_pgrp, sig, 1); put_pid(tty_pgrp); } +} - if (!L_NOFLSH(tty)) { +static void isig(int sig, struct tty_struct *tty) +{ + struct n_tty_data *ldata = tty->disc_data; + + if (L_NOFLSH(tty)) { + /* signal only */ + __isig(sig, tty); + + } else { /* signal and flush */ up_read(&tty->termios_rwsem); down_write(&tty->termios_rwsem); + __isig(sig, tty); + /* clear echo buffer */ mutex_lock(&ldata->output_lock); ldata->echo_head = ldata->echo_tail = 0; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 76e65b714471..15b4079a335e 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE config SERIAL_SC16IS7XX tristate "SC16IS7xx serial support" select SERIAL_CORE - depends on I2C || SPI_MASTER + depends on (SPI_MASTER && !I2C) || I2C help This selects support for SC16IS7xx serial ports. Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 50cf5b10ceed..fd27e986b1dd 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, void __iomem *base; base = devm_ioremap_resource(dev, mmiobase); - if (!base) - return -ENOMEM; + if (IS_ERR(base)) + return PTR_ERR(base); index = pl011_probe_dt_alias(index, dev); diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c index a57301a6fe42..679709f51fd4 100644 --- a/drivers/tty/serial/etraxfs-uart.c +++ b/drivers/tty/serial/etraxfs-uart.c @@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev) port = platform_get_drvdata(pdev); uart_remove_one_port(&etraxfs_uart_driver, port); - etraxfs_uart_ports[pdev->id] = NULL; + etraxfs_uart_ports[port->line] = NULL; return 0; } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 2c90dc31bfaa..54fdc7866ea1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port) writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); - /* Can we enable the DMA support? */ - if (is_imx6q_uart(sport) && !uart_console(port) && - !sport->dma_is_inited) - imx_uart_dma_init(sport); - spin_lock_irqsave(&sport->port.lock, flags); /* Reset fifo's and state machines */ i = 100; @@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port) writel(USR1_RTSD, sport->port.membase + USR1); writel(USR2_ORE, sport->port.membase + USR2); - if (sport->dma_is_inited && !sport->dma_is_enabled) - imx_enable_dma(sport); - temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; @@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, } else { ucr2 |= UCR2_CTSC; } + + /* Can we enable the DMA support? */ + if (is_imx6q_uart(sport) && !uart_console(port) + && !sport->dma_is_inited) + imx_uart_dma_init(sport); } else { termios->c_cflag &= ~CRTSCTS; } @@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) imx_enable_ms(&sport->port); + if (sport->dma_is_inited && !sport->dma_is_enabled) + imx_enable_dma(sport); spin_unlock_irqrestore(&sport->port.lock, flags); } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 9e6576004a42..5ccc698cbbfa 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val) (reg << SC16IS7XX_REG_SHIFT) | port->line, val); } +static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line; + + regcache_cache_bypass(s->regmap, true); + regmap_raw_read(s->regmap, addr, s->buf, rxlen); + regcache_cache_bypass(s->regmap, false); +} + +static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line; + + regcache_cache_bypass(s->regmap, true); + regmap_raw_write(s->regmap, addr, s->buf, to_send); + regcache_cache_bypass(s->regmap, false); +} + static void sc16is7xx_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val) { @@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); bytes_read = 1; } else { - regcache_cache_bypass(s->regmap, true); - regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG, - s->buf, rxlen); - regcache_cache_bypass(s->regmap, false); + sc16is7xx_fifo_read(port, rxlen); bytes_read = rxlen; } @@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port) s->buf[i] = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); } - regcache_cache_bypass(s->regmap, true); - regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); - regcache_cache_bypass(s->regmap, false); + + sc16is7xx_fifo_write(port, to_send); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 7ae1592f7ec9..f36852067f20 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) mutex_lock(&port->mutex); uart_shutdown(tty, state); tty_port_tty_set(port, NULL); - tty->closing = 0; + spin_lock_irqsave(&port->lock, flags); if (port->blocked_open) { @@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) mutex_unlock(&port->mutex); tty_ldisc_flush(tty); + tty->closing = 0; } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index ea27804d87af..381a2b13682c 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty) schedule(); continue; } + __set_current_state(TASK_RUNNING); count = sel_buffer_lth - pasted; count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, count); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8fe52989b380..4462d167900c 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init) __module_get(vc->vc_sw->owner); vc->vc_num = num; vc->vc_display_fg = &master_display_fg; + if (vc->vc_uni_pagedir_loc) + con_free_unimap(vc); vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; vc->vc_uni_pagedir = NULL; vc->vc_hi_font_mask = 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 519a77ba214c..b30e7423549b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1944,6 +1944,7 @@ static void __exit acm_exit(void) usb_deregister(&acm_driver); tty_unregister_driver(acm_tty_driver); put_tty_driver(acm_tty_driver); + idr_destroy(&acm_minors); } module_init(acm_init); diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 0e6f968e93fe..01c0c0477a9e 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -242,7 +242,7 @@ static int __init ulpi_init(void) { return bus_register(&ulpi_bus); } -module_init(ulpi_init); +subsys_initcall(ulpi_init); static void __exit ulpi_exit(void) { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index be5b2074f906..cbcd0920fb51 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd) dev_name(&usb_dev->dev), retval); return (retval < 0) ? retval : -EMSGSIZE; } - if (usb_dev->speed == USB_SPEED_SUPER) { + + if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(usb_dev); - if (retval < 0) { + if (!retval) { + usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); + } else if (usb_dev->speed == USB_SPEED_SUPER) { mutex_unlock(&usb_bus_list_lock); dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", dev_name(&usb_dev->dev), retval); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 43cb2f2e3b43..73dfa194160b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) return usb_get_intfdata(hdev->actconfig->interface[0]); } -static int usb_device_supports_lpm(struct usb_device *udev) +int usb_device_supports_lpm(struct usb_device *udev) { /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 7eb1e26798e5..457255a3306a 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -65,6 +65,7 @@ extern int usb_hub_init(void); extern void usb_hub_cleanup(void); extern int usb_major_init(void); extern void usb_major_cleanup(void); +extern int usb_device_supports_lpm(struct usb_device *udev); #ifdef CONFIG_PM diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index e5b546f1152e..c3cc1a78d1e2 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Backup Host regs */ - hr = hsotg->hr_backup; - if (!hr) { - hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL); - if (!hr) { - dev_err(hsotg->dev, "%s: can't allocate host regs\n", - __func__); - return -ENOMEM; - } - - hsotg->hr_backup = hr; - } + hr = &hsotg->hr_backup; hr->hcfg = readl(hsotg->regs + HCFG); hr->haintmsk = readl(hsotg->regs + HAINTMSK); for (i = 0; i < hsotg->core_params->host_channels; ++i) @@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) hr->hprt0 = readl(hsotg->regs + HPRT0); hr->hfir = readl(hsotg->regs + HFIR); + hr->valid = true; return 0; } @@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore host regs */ - hr = hsotg->hr_backup; - if (!hr) { + hr = &hsotg->hr_backup; + if (!hr->valid) { dev_err(hsotg->dev, "%s: no host registers to restore\n", __func__); return -EINVAL; } + hr->valid = false; writel(hr->hcfg, hsotg->regs + HCFG); writel(hr->haintmsk, hsotg->regs + HAINTMSK); @@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Backup dev regs */ - dr = hsotg->dr_backup; - if (!dr) { - dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL); - if (!dr) { - dev_err(hsotg->dev, "%s: can't allocate device regs\n", - __func__); - return -ENOMEM; - } - - hsotg->dr_backup = dr; - } + dr = &hsotg->dr_backup; dr->dcfg = readl(hsotg->regs + DCFG); dr->dctl = readl(hsotg->regs + DCTL); @@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i)); dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i)); } - + dr->valid = true; return 0; } @@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore dev regs */ - dr = hsotg->dr_backup; - if (!dr) { + dr = &hsotg->dr_backup; + if (!dr->valid) { dev_err(hsotg->dev, "%s: no device registers to restore\n", __func__); return -EINVAL; } + dr->valid = false; writel(dr->dcfg, hsotg->regs + DCFG); writel(dr->dctl, hsotg->regs + DCTL); @@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) int i; /* Backup global regs */ - gr = hsotg->gr_backup; - if (!gr) { - gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL); - if (!gr) { - dev_err(hsotg->dev, "%s: can't allocate global regs\n", - __func__); - return -ENOMEM; - } - - hsotg->gr_backup = gr; - } + gr = &hsotg->gr_backup; gr->gotgctl = readl(hsotg->regs + GOTGCTL); gr->gintmsk = readl(hsotg->regs + GINTMSK); @@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) for (i = 0; i < MAX_EPS_CHANNELS; i++) gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i)); + gr->valid = true; return 0; } @@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore global regs */ - gr = hsotg->gr_backup; - if (!gr) { + gr = &hsotg->gr_backup; + if (!gr->valid) { dev_err(hsotg->dev, "%s: no global registers to restore\n", __func__); return -EINVAL; } + gr->valid = false; writel(0xffffffff, hsotg->regs + GINTSTS); writel(gr->gotgctl, hsotg->regs + GOTGCTL); diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 53b8de03f102..0ed87620941b 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -492,6 +492,7 @@ struct dwc2_gregs_backup { u32 gdfifocfg; u32 dtxfsiz[MAX_EPS_CHANNELS]; u32 gpwrdn; + bool valid; }; /** @@ -521,6 +522,7 @@ struct dwc2_dregs_backup { u32 doepctl[MAX_EPS_CHANNELS]; u32 doeptsiz[MAX_EPS_CHANNELS]; u32 doepdma[MAX_EPS_CHANNELS]; + bool valid; }; /** @@ -538,6 +540,7 @@ struct dwc2_hregs_backup { u32 hcintmsk[MAX_EPS_CHANNELS]; u32 hprt0; u32 hfir; + bool valid; }; /** @@ -705,9 +708,9 @@ struct dwc2_hsotg { struct work_struct wf_otg; struct timer_list wkp_timer; enum dwc2_lx_state lx_state; - struct dwc2_gregs_backup *gr_backup; - struct dwc2_dregs_backup *dr_backup; - struct dwc2_hregs_backup *hr_backup; + struct dwc2_gregs_backup gr_backup; + struct dwc2_dregs_backup dr_backup; + struct dwc2_hregs_backup hr_backup; struct dentry *debug_root; struct debugfs_regset32 *regset; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index b10377c65064..f845c41fe9e5 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -359,10 +359,9 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) /* Caller must hold driver lock */ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, - struct dwc2_hcd_urb *urb, void **ep_handle, - gfp_t mem_flags) + struct dwc2_hcd_urb *urb, struct dwc2_qh *qh, + struct dwc2_qtd *qtd) { - struct dwc2_qtd *qtd; u32 intr_mask; int retval; int dev_speed; @@ -386,18 +385,15 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, return -ENODEV; } - qtd = kzalloc(sizeof(*qtd), mem_flags); if (!qtd) - return -ENOMEM; + return -EINVAL; dwc2_hcd_qtd_init(qtd, urb); - retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, - mem_flags); + retval = dwc2_hcd_qtd_add(hsotg, qtd, qh); if (retval) { dev_err(hsotg->dev, "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", retval); - kfree(qtd); return retval; } @@ -2445,6 +2441,9 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, u32 tflags = 0; void *buf; unsigned long flags; + struct dwc2_qh *qh; + bool qh_allocated = false; + struct dwc2_qtd *qtd; if (dbg_urb(urb)) { dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); @@ -2523,15 +2522,32 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, urb->iso_frame_desc[i].length); urb->hcpriv = dwc2_urb; + qh = (struct dwc2_qh *) ep->hcpriv; + /* Create QH for the endpoint if it doesn't exist */ + if (!qh) { + qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags); + if (!qh) { + retval = -ENOMEM; + goto fail0; + } + ep->hcpriv = qh; + qh_allocated = true; + } + + qtd = kzalloc(sizeof(*qtd), mem_flags); + if (!qtd) { + retval = -ENOMEM; + goto fail1; + } spin_lock_irqsave(&hsotg->lock, flags); retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval) - goto fail1; + goto fail2; - retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); + retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); if (retval) - goto fail2; + goto fail3; if (alloc_bandwidth) { dwc2_allocate_bus_bandwidth(hcd, @@ -2543,12 +2559,25 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return 0; -fail2: +fail3: dwc2_urb->priv = NULL; usb_hcd_unlink_urb_from_ep(hcd, urb); -fail1: +fail2: spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; + kfree(qtd); +fail1: + if (qh_allocated) { + struct dwc2_qtd *qtd2, *qtd2_tmp; + + ep->hcpriv = NULL; + dwc2_hcd_qh_unlink(hsotg, qh); + /* Free each QTD in the QH's QTD list */ + list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list, + qtd_list_entry) + dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh); + dwc2_hcd_qh_free(hsotg, qh); + } fail0: kfree(dwc2_urb); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 7b5841c40033..fc1054965552 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, /* Schedule Queue Functions */ /* Implemented in hcd_queue.c */ extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg); +extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, + struct dwc2_hcd_urb *urb, + gfp_t mem_flags); extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); @@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, - struct dwc2_qh **qh, gfp_t mem_flags); + struct dwc2_qh *qh); /* Unlinks and frees a QTD */ static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 9b5c36256627..3ad63d392e13 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, * * Return: Pointer to the newly allocated QH, or NULL on error */ -static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, +struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, gfp_t mem_flags) { @@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) * * @hsotg: The DWC HCD structure * @qtd: The QTD to add - * @qh: Out parameter to return queue head - * @atomic_alloc: Flag to do atomic alloc if needed + * @qh: Queue head to add qtd to * * Return: 0 if successful, negative error code otherwise * - * Finds the correct QH to place the QTD into. If it does not find a QH, it - * will create a new QH. If the QH to which the QTD is added is not currently - * scheduled, it is placed into the proper schedule based on its EP type. + * If the QH to which the QTD is added is not currently scheduled, it is placed + * into the proper schedule based on its EP type. */ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, - struct dwc2_qh **qh, gfp_t mem_flags) + struct dwc2_qh *qh) { - struct dwc2_hcd_urb *urb = qtd->urb; - int allocated = 0; int retval; - /* - * Get the QH which holds the QTD-list to insert to. Create QH if it - * doesn't exist. - */ - if (*qh == NULL) { - *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags); - if (*qh == NULL) - return -ENOMEM; - allocated = 1; + if (unlikely(!qh)) { + dev_err(hsotg->dev, "%s: Invalid QH\n", __func__); + retval = -EINVAL; + goto fail; } - retval = dwc2_hcd_qh_add(hsotg, *qh); + retval = dwc2_hcd_qh_add(hsotg, qh); if (retval) goto fail; - qtd->qh = *qh; - list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list); + qtd->qh = qh; + list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); return 0; - fail: - if (allocated) { - struct dwc2_qtd *qtd2, *qtd2_tmp; - struct dwc2_qh *qh_tmp = *qh; - - *qh = NULL; - dwc2_hcd_qh_unlink(hsotg, qh_tmp); - - /* Free each QTD in the QH's QTD list */ - list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list, - qtd_list_entry) - dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp); - - dwc2_hcd_qh_free(hsotg, qh_tmp); - } - return retval; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 5c110d8e293b..ff5773c66b84 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -446,10 +446,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc) /* Select the HS PHY interface */ switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: - if (!strncmp(dwc->hsphy_interface, "utmi", 4)) { + if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "utmi", 4)) { reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; break; - } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) { + } else if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "ulpi", 4)) { reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } else { diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 2ef3c8d6a9db..69e769c35cf5 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); break; + case USB_REQ_SET_INTERFACE: + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE"); + dwc->start_config_issued = false; + /* Fall through */ default: dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); ret = dwc3_ep0_delegate_req(dwc, ctrl); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 4e3447bbd097..58b4657fc721 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1758,10 +1758,13 @@ unknown: * take such requests too, if that's ever needed: to work * in config 0, etc. */ - list_for_each_entry(f, &cdev->config->functions, list) - if (f->req_match && f->req_match(f, ctrl)) - goto try_fun_setup; - f = NULL; + if (cdev->config) { + list_for_each_entry(f, &cdev->config->functions, list) + if (f->req_match && f->req_match(f, ctrl)) + goto try_fun_setup; + f = NULL; + } + switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 0495c94a23d7..289e20119fea 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -571,7 +571,7 @@ static struct config_group *function_make( if (IS_ERR(fi)) return ERR_CAST(fi); - ret = config_item_set_name(&fi->group.cg_item, name); + ret = config_item_set_name(&fi->group.cg_item, "%s", name); if (ret) { usb_put_function_instance(fi); return ERR_PTR(ret); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 45b8c8b338df..6e7be91e6097 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) kiocb->private = p; - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); res = ffs_epfile_io(kiocb->ki_filp, p); if (res == -EIOCBQUEUED) @@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) kiocb->private = p; - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); res = ffs_epfile_io(kiocb->ki_filp, p); if (res == -EIOCBQUEUED) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index d2259c663996..f936268d26c6 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns) return -EINVAL; } - curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL); + curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL); if (unlikely(!curlun)) return -ENOMEM; @@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns) common->luns = curlun; common->nluns = nluns; - pr_info("Number of LUNs=%d\n", common->nluns); - return 0; } EXPORT_SYMBOL_GPL(fsg_common_set_nluns); @@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi) struct fsg_opts *opts = fsg_opts_from_func_inst(fi); struct fsg_common *common = opts->common; struct fsg_dev *fsg; + unsigned nluns, i; fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); if (unlikely(!fsg)) return ERR_PTR(-ENOMEM); mutex_lock(&opts->lock); + if (!opts->refcnt) { + for (nluns = i = 0; i < FSG_MAX_LUNS; ++i) + if (common->luns[i]) + nluns = i + 1; + if (!nluns) + pr_warn("No LUNS defined, continuing anyway\n"); + else + common->nluns = nluns; + pr_info("Number of LUNs=%u\n", common->nluns); + } opts->refcnt++; mutex_unlock(&opts->lock); + fsg->function.name = FSG_DRIVER_DESC; fsg->function.bind = fsg_bind; fsg->function.unbind = fsg_unbind; diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 6316aa5b1c49..ad50a67c1465 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -1145,7 +1145,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) if (opts->id && !midi->id) { status = -ENOMEM; mutex_unlock(&opts->lock); - goto kstrdup_fail; + goto setup_fail; } midi->in_ports = opts->in_ports; midi->out_ports = opts->out_ports; @@ -1164,8 +1164,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) return &midi->func; -kstrdup_fail: - f_midi_unregister_card(midi); setup_fail: for (--i; i >= 0; i--) kfree(midi->in_port[i]); diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index e547ea7f56b1..1137e3384218 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -1171,7 +1171,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) udc_name, fotg210); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); - goto err_irq; + goto err_req; } ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget); @@ -1183,7 +1183,6 @@ static int fotg210_udc_probe(struct platform_device *pdev) return 0; err_add_udc: -err_irq: free_irq(ires->start, fotg210); err_req: diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index d32160d6463f..5da37c957b53 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev) return -ENODEV; } - udc->phy_regs = ioremap(r->start, resource_size(r)); + udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (udc->phy_regs == NULL) { dev_err(&pdev->dev, "failed to map phy I/O memory\n"); return -EBUSY; diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index d69c35558f68..362ee8af5fce 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock); int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { + struct device *dev = gadget->dev.parent; + if (req->length == 0) return 0; if (req->num_sgs) { int mapped; - mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, + mapped = dma_map_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mapped == 0) { dev_err(&gadget->dev, "failed to map SGs\n"); @@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget, req->num_mapped_sgs = mapped; } else { - req->dma = dma_map_single(&gadget->dev, req->buf, req->length, + req->dma = dma_map_single(dev, req->buf, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (dma_mapping_error(&gadget->dev, req->dma)) { - dev_err(&gadget->dev, "failed to map buffer\n"); + if (dma_mapping_error(dev, req->dma)) { + dev_err(dev, "failed to map buffer\n"); return -EFAULT; } } @@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget, return; if (req->num_mapped_sgs) { - dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, + dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; } else { - dma_unmap_single(&gadget->dev, req->dma, req->length, + dma_unmap_single(gadget->dev.parent, req->dma, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } } diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index f7d561ed3c23..d029bbe9eb36 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -981,10 +981,6 @@ rescan_all: int completed, modified; __hc32 *prev; - /* Is this ED already invisible to the hardware? */ - if (ed->state == ED_IDLE) - goto ed_idle; - /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ @@ -1012,12 +1008,10 @@ skip_ed: } /* ED's now officially unlinked, hc doesn't see */ - ed->state = ED_IDLE; ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); ed->hwNextED = 0; wmb(); ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); -ed_idle: /* reentrancy: if we drop the schedule lock, someone might * have modified this list. normally it's just prepending @@ -1088,6 +1082,7 @@ rescan_this: if (list_empty(&ed->td_list)) { *last = ed->ed_next; ed->ed_next = NULL; + ed->state = ED_IDLE; list_del(&ed->in_use_list); } else if (ohci->rh_state == OHCI_RH_RUNNING) { *last = ed->ed_next; diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index e9a6eec39142..cfcfadfc94fc 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -58,7 +58,7 @@ #define CCR_PM_CKRNEN 0x0002 #define CCR_PM_USBPW1 0x0004 #define CCR_PM_USBPW2 0x0008 -#define CCR_PM_USBPW3 0x0008 +#define CCR_PM_USBPW3 0x0010 #define CCR_PM_PMEE 0x0100 #define CCR_PM_PMES 0x8000 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e75c565feb53..78241b5550df 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, u32 pls = status_reg & PORT_PLS_MASK; /* resume state is a xHCI internal state. - * Do not report it to usb core. + * Do not report it to usb core, instead, pretend to be U3, + * thus usb core knows it's not ready for transfer */ - if (pls == XDEV_RESUME) + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; return; + } /* When the CAS bit is set then warm reset * should be performed on port @@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_C_RESET << 16; /* USB3.0 only */ if (hcd->speed == HCD_USB3) { - if ((raw_port_status & PORT_PLC)) + /* Port link change with port in resume state should not be + * reported to usbcore, as this is an internal state to be + * handled by xhci driver. Reporting PLC to usbcore may + * cause usbcore clearing PLC first and port change event + * irq won't be generated. + */ + if ((raw_port_status & PORT_PLC) && + (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) status |= USB_PORT_STAT_C_LINK_STATE << 16; if ((raw_port_status & PORT_WRC)) status |= USB_PORT_STAT_C_BH_RESET << 16; @@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd) spin_lock_irqsave(&xhci->lock, flags); if (hcd->self.root_hub->do_remote_wakeup) { - if (bus_state->resuming_ports) { + if (bus_state->resuming_ports || /* USB2 */ + bus_state->port_remote_wakeup) { /* USB3 */ spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "suspend failed because " - "a port is resuming\n"); + xhci_dbg(xhci, "suspend failed because a port is resuming\n"); return -EBUSY; } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f8336408ef07..3e442f77a2b9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Attempt to use the ring cache */ if (virt_dev->num_rings_cached == 0) return -ENOMEM; + virt_dev->num_rings_cached--; virt_dev->eps[ep_index].new_ring = virt_dev->ring_cache[virt_dev->num_rings_cached]; virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; - virt_dev->num_rings_cached--; xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1, type); } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4a4cb1d91ac8..5590eac2b22d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -23,10 +23,15 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/acpi.h> #include "xhci.h" #include "xhci-trace.h" +#define PORT2_SSIC_CONFIG_REG2 0x883c +#define PROG_DONE (1 << 30) +#define SSIC_PORT_UNUSED (1 << 31) + /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 @@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } /* + * In some Intel xHCI controllers, in order to get D3 working, + * through a vendor specific SSIC CONFIG register at offset 0x883c, + * SSIC PORT need to be marked as "unused" before putting xHCI + * into D3. After D3 exit, the SSIC port need to be marked as "used". + * Without this change, xHCI might not enter D3 state. * Make sure PME works on some Intel xHCI controllers by writing 1 to clear * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 */ -static void xhci_pme_quirk(struct xhci_hcd *xhci) +static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); u32 val; void __iomem *reg; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { + + reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; + + /* Notify SSIC that SSIC profile programming is not done */ + val = readl(reg) & ~PROG_DONE; + writel(val, reg); + + /* Mark SSIC port as unused(suspend) or used(resume) */ + val = readl(reg); + if (suspend) + val |= SSIC_PORT_UNUSED; + else + val &= ~SSIC_PORT_UNUSED; + writel(val, reg); + + /* Notify SSIC that SSIC profile programming is done */ + val = readl(reg) | PROG_DONE; + writel(val, reg); + readl(reg); + } + reg = (void __iomem *) xhci->cap_regs + 0x80a4; val = readl(reg); writel(val | BIT(28), reg); readl(reg); } +#ifdef CONFIG_ACPI +static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) +{ + static const u8 intel_dsm_uuid[] = { + 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, + 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, + }; + acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); +} +#else + static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } +#endif /* CONFIG_ACPI */ + /* called during probe() after chip reset completes */ static int xhci_pci_setup(struct usb_hcd *hcd) { @@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_acpi_rtd3_enable(dev); + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ pm_runtime_put_noidle(&dev->dev); @@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) pdev->no_d3cold = true; if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(xhci); + xhci_pme_quirk(hcd, true); return xhci_suspend(xhci, do_wakeup); } @@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) usb_enable_intel_xhci_ports(pdev); if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(xhci); + xhci_pme_quirk(hcd, false); retval = xhci_resume(xhci, hibernated); return retval; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 94416ff70810..6a8fc52aed58 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } + if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) + bus_state->port_remote_wakeup &= ~(1 << faked_port_index); + if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7da0d6043d33..526ebc0c7e72 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) return -EINVAL; } + if (virt_dev->tt_info) + old_active_eps = virt_dev->tt_info->active_eps; + if (virt_dev->udev != udev) { /* If the virt_dev and the udev does not match, this virt_dev * may belong to another udev. diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 31e46cc55807..ed2ebf647c38 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -285,6 +285,7 @@ struct xhci_op_regs { #define XDEV_U0 (0x0 << 5) #define XDEV_U2 (0x2 << 5) #define XDEV_U3 (0x3 << 5) +#define XDEV_INACTIVE (0x6 << 5) #define XDEV_RESUME (0xf << 5) /* true: port has power (see HCC_PPC) */ #define PORT_POWER (1 << 9) diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 30842bc195f5..92d5f718659b 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb) #ifdef CONFIG_USB_MUSB_HOST return 1; #else - if (musb->port_mode == MUSB_PORT_MODE_HOST) - return 1; - return musb->g.dev.driver != NULL; + return musb->port_mode == MUSB_PORT_MODE_HOST; #endif } diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 8f7cb068d29b..3fcc0483a081 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy) { unsigned int vbus_value; + if (!mxs_phy->regmap_anatop) + return false; + if (mxs_phy->port_id == 0) regmap_read(mxs_phy->regmap_anatop, ANADIG_USB1_VBUS_DET_STAT, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index ffd739e31bfc..eac7ccaa3c85 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ + { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 4f70df33975a..78b4f64c6b00 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock); static const unsigned int dummy; /* for clarity in register access fns */ enum mos_regs { - THR, /* serial port regs */ - RHR, - IER, - FCR, - ISR, - LCR, - MCR, - LSR, - MSR, - SPR, - DLL, - DLM, - DPR, /* parallel port regs */ - DSR, - DCR, - ECR, - SP1_REG, /* device control regs */ - SP2_REG, /* serial port 2 (7720 only) */ - PP_REG, - SP_CONTROL_REG, + MOS7720_THR, /* serial port regs */ + MOS7720_RHR, + MOS7720_IER, + MOS7720_FCR, + MOS7720_ISR, + MOS7720_LCR, + MOS7720_MCR, + MOS7720_LSR, + MOS7720_MSR, + MOS7720_SPR, + MOS7720_DLL, + MOS7720_DLM, + MOS7720_DPR, /* parallel port regs */ + MOS7720_DSR, + MOS7720_DCR, + MOS7720_ECR, + MOS7720_SP1_REG, /* device control regs */ + MOS7720_SP2_REG, /* serial port 2 (7720 only) */ + MOS7720_PP_REG, + MOS7720_SP_CONTROL_REG, }; /* @@ -150,26 +150,26 @@ enum mos_regs { static inline __u16 get_reg_index(enum mos_regs reg) { static const __u16 mos7715_index_lookup_table[] = { - 0x00, /* THR */ - 0x00, /* RHR */ - 0x01, /* IER */ - 0x02, /* FCR */ - 0x02, /* ISR */ - 0x03, /* LCR */ - 0x04, /* MCR */ - 0x05, /* LSR */ - 0x06, /* MSR */ - 0x07, /* SPR */ - 0x00, /* DLL */ - 0x01, /* DLM */ - 0x00, /* DPR */ - 0x01, /* DSR */ - 0x02, /* DCR */ - 0x0a, /* ECR */ - 0x01, /* SP1_REG */ - 0x02, /* SP2_REG (7720 only) */ - 0x04, /* PP_REG (7715 only) */ - 0x08, /* SP_CONTROL_REG */ + 0x00, /* MOS7720_THR */ + 0x00, /* MOS7720_RHR */ + 0x01, /* MOS7720_IER */ + 0x02, /* MOS7720_FCR */ + 0x02, /* MOS7720_ISR */ + 0x03, /* MOS7720_LCR */ + 0x04, /* MOS7720_MCR */ + 0x05, /* MOS7720_LSR */ + 0x06, /* MOS7720_MSR */ + 0x07, /* MOS7720_SPR */ + 0x00, /* MOS7720_DLL */ + 0x01, /* MOS7720_DLM */ + 0x00, /* MOS7720_DPR */ + 0x01, /* MOS7720_DSR */ + 0x02, /* MOS7720_DCR */ + 0x0a, /* MOS7720_ECR */ + 0x01, /* MOS7720_SP1_REG */ + 0x02, /* MOS7720_SP2_REG (7720 only) */ + 0x04, /* MOS7720_PP_REG (7715 only) */ + 0x08, /* MOS7720_SP_CONTROL_REG */ }; return mos7715_index_lookup_table[reg]; } @@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg) static inline __u16 get_reg_value(enum mos_regs reg, unsigned int serial_portnum) { - if (reg >= SP1_REG) /* control reg */ + if (reg >= MOS7720_SP1_REG) /* control reg */ return 0x0000; - else if (reg >= DPR) /* parallel port reg (7715 only) */ + else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */ return 0x0100; else /* serial port reg */ @@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport, enum mos7715_pp_modes mode) { mos_parport->shadowECR = mode; - write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR, + mos_parport->shadowECR); return 0; } @@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d) if (parport_prologue(pp) < 0) return; mos7715_change_mode(mos_parport, SPP); - write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d); parport_epilogue(pp); } @@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp) if (parport_prologue(pp) < 0) return 0; - read_mos_reg(mos_parport->serial, dummy, DPR, &d); + read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d); parport_epilogue(pp); return d; } @@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d) if (parport_prologue(pp) < 0) return; data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0); - write_mos_reg(mos_parport->serial, dummy, DCR, data); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data); mos_parport->shadowDCR = data; parport_epilogue(pp); } @@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp, if (parport_prologue(pp) < 0) return 0; mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); dcr = mos_parport->shadowDCR & 0x0f; parport_epilogue(pp); return dcr; @@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp) return; mos7715_change_mode(mos_parport, PS2); mos_parport->shadowDCR &= ~0x20; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); parport_epilogue(pp); } @@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp) return; mos7715_change_mode(mos_parport, PS2); mos_parport->shadowDCR |= 0x20; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); parport_epilogue(pp); } @@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp, spin_unlock(&release_lock); return; } - write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR); - write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR); + write_parport_reg_nonblock(mos_parport, MOS7720_DCR, + mos_parport->shadowDCR); + write_parport_reg_nonblock(mos_parport, MOS7720_ECR, + mos_parport->shadowECR); spin_unlock(&release_lock); } @@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial) init_completion(&mos_parport->syncmsg_compl); /* cycle parallel port reset bit */ - write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80); - write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00); + write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80); + write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00); /* initialize device registers */ mos_parport->shadowDCR = DCR_INIT_VAL; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); mos_parport->shadowECR = ECR_INIT_VAL; - write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR, + mos_parport->shadowECR); /* register with parport core */ mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE, @@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) /* Initialize MCS7720 -- Write Init values to corresponding Registers * * Register Index - * 0 : THR/RHR - * 1 : IER - * 2 : FCR - * 3 : LCR - * 4 : MCR - * 5 : LSR - * 6 : MSR - * 7 : SPR + * 0 : MOS7720_THR/MOS7720_RHR + * 1 : MOS7720_IER + * 2 : MOS7720_FCR + * 3 : MOS7720_LCR + * 4 : MOS7720_MCR + * 5 : MOS7720_LSR + * 6 : MOS7720_MSR + * 7 : MOS7720_SPR * * 0x08 : SP1/2 Control Reg */ port_number = port->port_number; - read_mos_reg(serial, port_number, LSR, &data); + read_mos_reg(serial, port_number, MOS7720_LSR, &data); dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data); - write_mos_reg(serial, dummy, SP1_REG, 0x02); - write_mos_reg(serial, dummy, SP2_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02); - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); mos7720_port->shadowLCR = 0x03; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); - write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00); - read_mos_reg(serial, dummy, SP_CONTROL_REG, &data); + write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00); + read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data); data = data | (port->port_number + 1); - write_mos_reg(serial, dummy, SP_CONTROL_REG, data); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data); mos7720_port->shadowLCR = 0x83; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, THR, 0x0c); - write_mos_reg(serial, port_number, IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_THR, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); mos7720_port->shadowLCR = 0x03; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); response = usb_submit_urb(port->read_urb, GFP_KERNEL); if (response) @@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port) usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); - write_mos_reg(serial, port->port_number, MCR, 0x00); - write_mos_reg(serial, port->port_number, IER, 0x00); + write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00); + write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00); mos7720_port->open = 0; } @@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state) data = mos7720_port->shadowLCR & ~UART_LCR_SBC; mos7720_port->shadowLCR = data; - write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port->port_number, MOS7720_LCR, + mos7720_port->shadowLCR); } /* @@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty) /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7720_port->shadowMCR &= ~UART_MCR_RTS; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); } } @@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty) /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7720_port->shadowMCR |= UART_MCR_RTS; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); } } @@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port, dev_dbg(&port->dev, "Sending Setting Commands ..........\n"); port_number = port->port_number; - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00); /*********************************************** * Set for higher rates * ***********************************************/ /* writing baud rate verbatum into uart clock field clearly not right */ if (port_number == 0) - sp_reg = SP1_REG; + sp_reg = MOS7720_SP1_REG; else - sp_reg = SP2_REG; + sp_reg = MOS7720_SP2_REG; write_mos_reg(serial, dummy, sp_reg, baud * 0x10); - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03); mos7720_port->shadowMCR = 0x2b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /*********************************************** * Set DLL/DLM ***********************************************/ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, DLL, 0x01); - write_mos_reg(serial, port_number, DLM, 0x00); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_DLL, 0x01); + write_mos_reg(serial, port_number, MOS7720_DLM, 0x00); mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); return 0; } @@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port, /* Enable access to divisor latch */ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; - write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR); /* Write the divisor */ - write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff)); - write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8)); + write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff)); + write_mos_reg(serial, number, MOS7720_DLM, + (__u8)((divisor & 0xff00) >> 8)); /* Disable access to divisor latch */ mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; - write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR); return status; } @@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty, /* Disable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); /* Send the updated LCR value to the mos7720 */ - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /* set up the MCR register and send it to the mos7720 */ mos7720_port->shadowMCR = UART_MCR_OUT2; @@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty, /* To set hardware flow control to the specified * * serial port, in SP1/2_CONTROL_REG */ if (port_number) - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, + 0x01); else - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, + 0x02); } else mos7720_port->shadowMCR &= ~(UART_MCR_XONANY); - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); @@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty, if (baud >= 230400) { set_higher_rates(mos7720_port, baud); /* Enable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); return; } @@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty, if (cflag & CBAUD) tty_encode_baud_rate(tty, baud, baud); /* Enable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); if (port->read_urb->status != -EINPROGRESS) { status = usb_submit_urb(port->read_urb, GFP_KERNEL); @@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty, count = mos7720_chars_in_buffer(tty); if (count == 0) { - read_mos_reg(port->serial, port_number, LSR, &data); + read_mos_reg(port->serial, port_number, MOS7720_LSR, &data); if ((data & (UART_LSR_TEMT | UART_LSR_THRE)) == (UART_LSR_TEMT | UART_LSR_THRE)) { dev_dbg(&port->dev, "%s -- Empty\n", __func__); @@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty, mcr &= ~UART_MCR_LOOP; mos7720_port->shadowMCR = mcr; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); return 0; @@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd, } mos7720_port->shadowMCR = mcr; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); return 0; @@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial) } #endif /* LSR For Port 1 */ - read_mos_reg(serial, 0, LSR, &data); + read_mos_reg(serial, 0, MOS7720_LSR, &data); dev_dbg(&dev->dev, "LSR:%x\n", data); return 0; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index f0c0c53359ad..19b85ee98a72 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1765,6 +1765,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { } /* Terminating entry */ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 529066bbc7e8..46f1f13b41f1 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void) tty_unregister_driver(usb_serial_tty_driver); put_tty_driver(usb_serial_tty_driver); bus_unregister(&usb_serial_bus_type); + idr_destroy(&serial_minors); } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index caf188800c67..6b2479123de7 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_READ_DISC_INFO ), +/* Reported by Oliver Neukum <oneukum@suse.com> + * This device morphes spontaneously into another device if the access + * pattern of Windows isn't followed. Thus writable media would be dirty + * if the initial instance is used. So the device is limited to its + * virtual CD. + * And yes, the concept that BCD goes up to 9 is not heeded */ +UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff, + "ZTE,Incorporated", + "ZTE WCDMA Technologies MSM", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SINGLE_LUN ), + /* Reported by Sven Geggus <sven-usbst@geggus.net> * This encrypted pen drive returns bogus data for the initial READ(10). */ @@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_INITIAL_READ10 ), +/* Reported by Hans de Goede <hdegoede@redhat.com> + * These are mini projectors using USB for both power and video data transport + * The usb-storage interface is a virtual windows driver CD, which the gm12u320 + * driver automatically converts into framebuffer & kms dri device nodes. + */ +UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff, + "Grain-media Technology Corp.", + "USB3.0 Device GM12U320", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + /* Patch by Richard Schütz <r.schtz@t-online.de> * This external hard drive enclosure uses a JMicron chip which * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9e8e004bb1c3..a9fe859f43c8 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -22,14 +22,20 @@ #include <linux/file.h> #include <linux/highmem.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/kthread.h> #include <linux/cgroup.h> #include <linux/module.h> +#include <linux/sort.h> #include "vhost.h" +static ushort max_mem_regions = 64; +module_param(max_mem_regions, ushort, 0444); +MODULE_PARM_DESC(max_mem_regions, + "Maximum number of memory regions in memory map. (default: 64)"); + enum { - VHOST_MEMORY_MAX_NREGIONS = 64, VHOST_MEMORY_F_LOG = 0x1, }; @@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ - kfree(dev->memory); + kvfree(dev->memory); dev->memory = NULL; WARN_ON(!list_empty(&dev->work_list)); if (dev->worker) { @@ -663,6 +669,28 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq) } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); +static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2) +{ + const struct vhost_memory_region *r1 = p1, *r2 = p2; + if (r1->guest_phys_addr < r2->guest_phys_addr) + return 1; + if (r1->guest_phys_addr > r2->guest_phys_addr) + return -1; + return 0; +} + +static void *vhost_kvzalloc(unsigned long size) +{ + void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + + if (!n) { + n = vzalloc(size); + if (!n) + return ERR_PTR(-ENOMEM); + } + return n; +} + static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; @@ -673,21 +701,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -EFAULT; if (mem.padding) return -EOPNOTSUPP; - if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) + if (mem.nregions > max_mem_regions) return -E2BIG; - newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); + newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); if (!newmem) return -ENOMEM; memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, mem.nregions * sizeof *m->regions)) { - kfree(newmem); + kvfree(newmem); return -EFAULT; } + sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions), + vhost_memory_reg_sort_cmp, NULL); if (!memory_access_ok(d, newmem, 0)) { - kfree(newmem); + kvfree(newmem); return -EFAULT; } oldmem = d->memory; @@ -699,7 +729,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) d->vqs[i]->memory = newmem; mutex_unlock(&d->vqs[i]->mutex); } - kfree(oldmem); + kvfree(oldmem); return 0; } @@ -992,17 +1022,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl); static const struct vhost_memory_region *find_region(struct vhost_memory *mem, __u64 addr, __u32 len) { - struct vhost_memory_region *reg; - int i; + const struct vhost_memory_region *reg; + int start = 0, end = mem->nregions; - /* linear search is not brilliant, but we really have on the order of 6 - * regions in practice */ - for (i = 0; i < mem->nregions; ++i) { - reg = mem->regions + i; - if (reg->guest_phys_addr <= addr && - reg->guest_phys_addr + reg->memory_size - 1 >= addr) - return reg; + while (start < end) { + int slot = start + (end - start) / 2; + reg = mem->regions + slot; + if (addr >= reg->guest_phys_addr) + end = slot; + else + start = slot + 1; } + + reg = mem->regions + start; + if (addr >= reg->guest_phys_addr && + reg->guest_phys_addr + reg->memory_size > addr) + return reg; return NULL; } diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index c1b03f4235b9..4e7fec36f5c3 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -4,7 +4,7 @@ * Watchdog driver for ARM SP805 watchdog module * * Copyright (C) 2010 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2 or later. This program is licensed "as is" without any @@ -303,6 +303,6 @@ static struct amba_driver sp805_wdt_driver = { module_amba_driver(sp805_wdt_driver); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); MODULE_LICENSE("GPL"); |