diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-08-18 10:39:47 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-18 10:39:47 +0300 |
commit | a5dd19249607474ed680f16b4f5e8477d2b68689 (patch) | |
tree | 1aeeef053773c07bc02f3aa637febc80ceb96732 /drivers | |
parent | 6b7e26547fad7ace3dcb27a5babd2317fb9d1e12 (diff) | |
parent | 512255a2ad2c832ca7d4de9f31245f73781922d0 (diff) | |
download | linux-a5dd19249607474ed680f16b4f5e8477d2b68689.tar.xz |
Merge branch 'x86/urgent' into x86/asm to fix up conflicts and to pick up fixes
Conflicts:
arch/x86/entry/entry_64_compat.S
arch/x86/math-emu/get_address.c
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers')
267 files changed, 4388 insertions, 2405 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 717afcdb5f4a..88dbbb115285 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) dev_warn(&device->dev, "Failed to change power state to %s\n", acpi_power_state_string(state)); } else { - device->power.state = state; + device->power.state = target_state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] transitioned to %s\n", device->pnp.bus_id, diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 81751a49d8bf..56486d92c4e7 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (!blk) return -ENOMEM; - present = krealloc(rbnode->cache_present, - BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); - if (!present) { - kfree(blk); - return -ENOMEM; + if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { + present = krealloc(rbnode->cache_present, + BITS_TO_LONGS(blklen) * sizeof(*present), + GFP_KERNEL); + if (!present) { + kfree(blk); + return -ENOMEM; + } + + memset(present + BITS_TO_LONGS(rbnode->blklen), 0, + (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) + * sizeof(*present)); + } else { + present = rbnode->cache_present; } /* insert the register value in the correct place in the rbnode block */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d94529d5c8e9..bc67a93aa4f4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) # define rbd_assert(expr) ((void) 0) #endif /* !RBD_DEBUG */ +static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); @@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) obj_request_done_set(obj_request); } +static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) +{ + dout("%s: obj %p\n", __func__, obj_request); + + if (obj_request_img_data_test(obj_request)) + rbd_osd_copyup_callback(obj_request); + else + obj_request_done_set(obj_request); +} + static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, struct ceph_msg *msg) { @@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_discard_callback(obj_request); break; case CEPH_OSD_OP_CALL: + rbd_osd_call_callback(obj_request); + break; case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_WATCH: rbd_osd_trivial_callback(obj_request); @@ -2530,13 +2543,15 @@ out_unwind: } static void -rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) +rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; struct page **pages; u32 page_count; + dout("%s: obj %p\n", __func__, obj_request); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO || obj_request->type == OBJ_REQUEST_NODATA); rbd_assert(obj_request_img_data_test(obj_request)); @@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) if (!obj_request->result) obj_request->xferred = obj_request->length; - /* Finish up with the normal image object callback */ - - rbd_img_obj_callback(obj_request); + obj_request_done_set(obj_request); } static void @@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) /* All set, send it off. */ - orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; img_result = rbd_obj_request_submit(osdc, orig_request); if (!img_result) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index ced96777b677..954c0029fb3b 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) return; } - if (work_pending(&blkif->persistent_purge_work)) { - pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); + if (work_busy(&blkif->persistent_purge_work)) { + pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); return; } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 6d89ed35d80c..7a8a73f1fc04 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock); ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) static int blkfront_setup_indirect(struct blkfront_info *info); +static int blkfront_gather_backend_features(struct blkfront_info *info); static int get_id_from_freelist(struct blkfront_info *info) { @@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, * Add the used indirect page back to the list of * available pages for indirect grefs. */ - indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); - list_add(&indirect_page->lru, &info->indirect_pages); + if (!info->feature_persistent) { + indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); + list_add(&indirect_page->lru, &info->indirect_pages); + } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } @@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info) info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; - rc = blkfront_setup_indirect(info); + rc = blkfront_gather_backend_features(info); if (rc) { kfree(copy); return rc; @@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info) static int blkfront_setup_indirect(struct blkfront_info *info) { - unsigned int indirect_segments, segs; + unsigned int segs; int err, i; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-max-indirect-segments", "%u", &indirect_segments, - NULL); - if (err) { - info->max_indirect_segments = 0; + if (info->max_indirect_segments == 0) segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; - } else { - info->max_indirect_segments = min(indirect_segments, - xen_blkif_max_segments); + else segs = info->max_indirect_segments; - } err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); if (err) @@ -1797,6 +1793,68 @@ out_of_memory: } /* + * Gather all backend feature-* + */ +static int blkfront_gather_backend_features(struct blkfront_info *info) +{ + int err; + int barrier, flush, discard, persistent; + unsigned int indirect_segments; + + info->feature_flush = 0; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-barrier", "%d", &barrier, + NULL); + + /* + * If there's no "feature-barrier" defined, then it means + * we're dealing with a very old backend which writes + * synchronously; nothing to do. + * + * If there are barriers, then we use flush. + */ + if (!err && barrier) + info->feature_flush = REQ_FLUSH | REQ_FUA; + /* + * And if there is "feature-flush-cache" use that above + * barriers. + */ + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-flush-cache", "%d", &flush, + NULL); + + if (!err && flush) + info->feature_flush = REQ_FLUSH; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-discard", "%d", &discard, + NULL); + + if (!err && discard) + blkfront_setup_discard(info); + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-persistent", "%u", &persistent, + NULL); + if (err) + info->feature_persistent = 0; + else + info->feature_persistent = persistent; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-max-indirect-segments", "%u", &indirect_segments, + NULL); + if (err) + info->max_indirect_segments = 0; + else + info->max_indirect_segments = min(indirect_segments, + xen_blkif_max_segments); + + return blkfront_setup_indirect(info); +} + +/* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ @@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info) unsigned int physical_sector_size; unsigned int binfo; int err; - int barrier, flush, discard, persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info) if (err != 1) physical_sector_size = sector_size; - info->feature_flush = 0; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-barrier", "%d", &barrier, - NULL); - - /* - * If there's no "feature-barrier" defined, then it means - * we're dealing with a very old backend which writes - * synchronously; nothing to do. - * - * If there are barriers, then we use flush. - */ - if (!err && barrier) - info->feature_flush = REQ_FLUSH | REQ_FUA; - /* - * And if there is "feature-flush-cache" use that above - * barriers. - */ - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-flush-cache", "%d", &flush, - NULL); - - if (!err && flush) - info->feature_flush = REQ_FLUSH; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-discard", "%d", &discard, - NULL); - - if (!err && discard) - blkfront_setup_discard(info); - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-persistent", "%u", &persistent, - NULL); - if (err) - info->feature_persistent = 0; - else - info->feature_persistent = persistent; - - err = blkfront_setup_indirect(info); + err = blkfront_gather_backend_features(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index fb655e8d1e3b..763301c7828c 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize) kfree(meta); } -static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) +static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize) { size_t num_pages; - char pool_name[8]; struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); if (!meta) @@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) goto out_error; } - snprintf(pool_name, sizeof(pool_name), "zram%d", device_id); meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); if (!meta->mem_pool) { pr_err("Error creating memory pool\n"); @@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev, return -EINVAL; disksize = PAGE_ALIGN(disksize); - meta = zram_meta_alloc(zram->disk->first_minor, disksize); + meta = zram_meta_alloc(zram->disk->disk_name, disksize); if (!meta) return -ENOMEM; diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index da8faf78536a..5643b65cee20 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused) static void start_khwrngd(void) { hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); - if (hwrng_fill == ERR_PTR(-ENOMEM)) { + if (IS_ERR(hwrng_fill)) { pr_err("hwrng_fill thread creation failed"); hwrng_fill = NULL; } diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c index 4b93a1efb36d..ac03ba49e9d1 100644 --- a/drivers/clk/pxa/clk-pxa3xx.c +++ b/drivers/clk/pxa/clk-pxa3xx.c @@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" }; PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" }; PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" }; -#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB) +#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA) #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \ div_hp, bit, is_lp, flags) \ PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \ diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index b8ff3c64cc45..c96de14036a0 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs) { struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); + if (!ch->cs_enabled) + return; + sh_cmt_stop(ch, FLAG_CLOCKSOURCE); pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); } @@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs) { struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); + if (!ch->cs_enabled) + return; + pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); sh_cmt_start(ch, FLAG_CLOCKSOURCE); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 26063afb3eba..7a3c30c4336f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) int ret = 0; /* Some related CPUs might not be present (physically hotplugged) */ - for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { + for_each_cpu(j, policy->real_cpus) { if (j == policy->kobj_cpu) continue; @@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) unsigned int j; /* Some related CPUs might not be present (physically hotplugged) */ - for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { + for_each_cpu(j, policy->real_cpus) { if (j == policy->kobj_cpu) continue; @@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) goto err_free_cpumask; + if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) + goto err_free_rcpumask; + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, "cpufreq"); if (ret) { pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); - goto err_free_rcpumask; + goto err_free_real_cpus; } INIT_LIST_HEAD(&policy->policy_list); @@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) return policy; +err_free_real_cpus: + free_cpumask_var(policy->real_cpus); err_free_rcpumask: free_cpumask_var(policy->related_cpus); err_free_cpumask: @@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) write_unlock_irqrestore(&cpufreq_driver_lock, flags); cpufreq_policy_put_kobj(policy, notify); + free_cpumask_var(policy->real_cpus); free_cpumask_var(policy->related_cpus); free_cpumask_var(policy->cpus); kfree(policy); @@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) pr_debug("adding CPU %u\n", cpu); - /* - * Only possible if 'cpu' wasn't physically present earlier and we are - * here from subsys_interface add callback. A hotplug notifier will - * follow and we will handle it like logical CPU hotplug then. For now, - * just create the sysfs link. - */ - if (cpu_is_offline(cpu)) - return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); + if (cpu_is_offline(cpu)) { + /* + * Only possible if we are here from the subsys_interface add + * callback. A hotplug notifier will follow and we will handle + * it as CPU online then. For now, just create the sysfs link, + * unless there is no policy or the link is already present. + */ + policy = per_cpu(cpufreq_cpu_data, cpu); + return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) + ? add_cpu_dev_symlink(policy, cpu) : 0; + } if (!down_read_trylock(&cpufreq_rwsem)) return 0; @@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* related cpus should atleast have policy->cpus */ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); + /* Remember which CPUs have been present at the policy creation time. */ + if (!recover_policy) + cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); + /* * affected cpus must always be the one, which are online. We aren't * managing offline cpus here. @@ -1420,8 +1433,7 @@ nomem_out: return ret; } -static int __cpufreq_remove_dev_prepare(struct device *dev, - struct subsys_interface *sif) +static int __cpufreq_remove_dev_prepare(struct device *dev) { unsigned int cpu = dev->id; int ret = 0; @@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); - if (ret) { + if (ret) pr_err("%s: Failed to stop governor\n", __func__); - return ret; - } } down_write(&policy->rwsem); @@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, return ret; } -static int __cpufreq_remove_dev_finish(struct device *dev, - struct subsys_interface *sif) +static int __cpufreq_remove_dev_finish(struct device *dev) { unsigned int cpu = dev->id; int ret; @@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, /* If cpu is last user of policy, free policy */ if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); - if (ret) { + if (ret) pr_err("%s: Failed to exit governor\n", __func__); - return ret; - } } /* @@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, if (cpufreq_driver->exit) cpufreq_driver->exit(policy); - /* Free the policy only if the driver is getting removed. */ - if (sif) - cpufreq_policy_free(policy, true); - return 0; } @@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev, static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; - int ret; - - /* - * Only possible if 'cpu' is getting physically removed now. A hotplug - * notifier should have already been called and we just need to remove - * link or free policy here. - */ - if (cpu_is_offline(cpu)) { - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); - struct cpumask mask; + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); - if (!policy) - return 0; + if (!policy) + return 0; - cpumask_copy(&mask, policy->related_cpus); - cpumask_clear_cpu(cpu, &mask); + if (cpu_online(cpu)) { + __cpufreq_remove_dev_prepare(dev); + __cpufreq_remove_dev_finish(dev); + } - /* - * Free policy only if all policy->related_cpus are removed - * physically. - */ - if (cpumask_intersects(&mask, cpu_present_mask)) { - remove_cpu_dev_symlink(policy, cpu); - return 0; - } + cpumask_clear_cpu(cpu, policy->real_cpus); + if (cpumask_empty(policy->real_cpus)) { cpufreq_policy_free(policy, true); return 0; } - ret = __cpufreq_remove_dev_prepare(dev, sif); + if (cpu != policy->kobj_cpu) { + remove_cpu_dev_symlink(policy, cpu); + } else { + /* + * The CPU owning the policy object is going away. Move it to + * another suitable CPU. + */ + unsigned int new_cpu = cpumask_first(policy->real_cpus); + struct device *new_dev = get_cpu_device(new_cpu); + + dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu); - if (!ret) - ret = __cpufreq_remove_dev_finish(dev, sif); + sysfs_remove_link(&new_dev->kobj, "cpufreq"); + policy->kobj_cpu = new_cpu; + WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); + } - return ret; + return 0; } static void handle_update(struct work_struct *work) @@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, break; case CPU_DOWN_PREPARE: - __cpufreq_remove_dev_prepare(dev, NULL); + __cpufreq_remove_dev_prepare(dev); break; case CPU_POST_DEAD: - __cpufreq_remove_dev_finish(dev, NULL); + __cpufreq_remove_dev_finish(dev); break; case CPU_DOWN_FAILED: diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 7c56d7eaa671..7898de054f4e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = { .get_max = core_get_max_pstate, .get_min = core_get_min_pstate, .get_turbo = knl_get_turbo_pstate, + .get_scaling = core_get_scaling, .set = core_set_pstate, }, }; diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index e362860c2b50..cd593c1f66dc 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -20,7 +20,7 @@ #include <asm/clock.h> #include <asm/idle.h> -#include <asm/mach-loongson/loongson.h> +#include <asm/mach-loongson64/loongson.h> static uint nowait; diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 7ba495f75370..402631a19a11 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) crypt->mode |= NPE_OP_NOT_IN_PLACE; /* This was never tested by Intel * for more than one dst buffer, I think. */ - BUG_ON(req->dst->length < nbytes); req_ctx->dst = NULL; if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, flags, DMA_FROM_DEVICE)) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 067402c7c2a9..df427c0e9e7b 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -73,7 +73,8 @@ ICP_QAT_HW_CIPHER_KEY_CONVERT, \ ICP_QAT_HW_CIPHER_DECRYPT) -static atomic_t active_dev; +static DEFINE_MUTEX(algs_lock); +static unsigned int active_devs; struct qat_alg_buf { uint32_t len; @@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { { int qat_algs_register(void) { - if (atomic_add_return(1, &active_dev) == 1) { + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs == 1) { int i; for (i = 0; i < ARRAY_SIZE(qat_algs); i++) @@ -1289,21 +1293,25 @@ int qat_algs_register(void) CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; - return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); + ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); } - return 0; + mutex_unlock(&algs_lock); + return ret; } int qat_algs_unregister(void) { - if (atomic_sub_return(1, &active_dev) == 0) - return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); - return 0; + int ret = 0; + + mutex_lock(&algs_lock); + if (--active_devs == 0) + ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + mutex_unlock(&algs_lock); + return ret; } int qat_algs_init(void) { - atomic_set(&active_dev, 0); crypto_get_default_rng(); return 0; } diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 59892126d175..d3629b7482dd 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -48,6 +48,8 @@ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define ATC_MAX_DSCR_TRIALS 10 + /* * Initial number of descriptors to allocate for each channel. This could * be increased during dma usage. @@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, * * @current_len: the number of bytes left before reading CTRLA * @ctrla: the value of CTRLA - * @desc: the descriptor containing the transfer width */ -static inline int atc_calc_bytes_left(int current_len, u32 ctrla, - struct at_desc *desc) +static inline int atc_calc_bytes_left(int current_len, u32 ctrla) { - return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); -} + u32 btsize = (ctrla & ATC_BTSIZE_MAX); + u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); -/** - * atc_calc_bytes_left_from_reg - calculates the number of bytes left according - * to the current value of CTRLA. - * - * @current_len: the number of bytes left before reading CTRLA - * @atchan: the channel to read CTRLA for - * @desc: the descriptor containing the transfer width - */ -static inline int atc_calc_bytes_left_from_reg(int current_len, - struct at_dma_chan *atchan, struct at_desc *desc) -{ - u32 ctrla = channel_readl(atchan, CTRLA); - - return atc_calc_bytes_left(current_len, ctrla, desc); + /* + * According to the datasheet, when reading the Control A Register + * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the + * number of transfers completed on the Source Interface. + * So btsize is always a number of source width transfers. + */ + return current_len - (btsize << src_width); } /** @@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr; + u32 ctrla, dscr, trials; /* * If the cookie doesn't match to the currently running transfer then @@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) * the channel's DSCR register and compare it against the value * of the hardware linked list structure of each child * descriptor. + * + * The CTRLA register provides us with the amount of data + * already read from the source for the current child + * descriptor. So we can compute a more accurate residue by also + * removing the number of bytes corresponding to this amount of + * data. + * + * However, the DSCR and CTRLA registers cannot be read both + * atomically. Hence a race condition may occur: the first read + * register may refer to one child descriptor whereas the second + * read may refer to a later child descriptor in the list + * because of the DMA transfer progression inbetween the two + * reads. + * + * One solution could have been to pause the DMA transfer, read + * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, + * this approach presents some drawbacks: + * - If the DMA transfer is paused, RX overruns or TX underruns + * are more likey to occur depending on the system latency. + * Taking the USART driver as an example, it uses a cyclic DMA + * transfer to read data from the Receive Holding Register + * (RHR) to avoid RX overruns since the RHR is not protected + * by any FIFO on most Atmel SoCs. So pausing the DMA transfer + * to compute the residue would break the USART driver design. + * - The atc_pause() function masks interrupts but we'd rather + * avoid to do so for system latency purpose. + * + * Then we'd rather use another solution: the DSCR is read a + * first time, the CTRLA is read in turn, next the DSCR is read + * a second time. If the two consecutive read values of the DSCR + * are the same then we assume both refers to the very same + * child descriptor as well as the CTRLA value read inbetween + * does. For cyclic tranfers, the assumption is that a full loop + * is "not so fast". + * If the two DSCR values are different, we read again the CTRLA + * then the DSCR till two consecutive read values from DSCR are + * equal or till the maxium trials is reach. + * This algorithm is very unlikely not to find a stable value for + * DSCR. */ - ctrla = channel_readl(atchan, CTRLA); - rmb(); /* ensure CTRLA is read before DSCR */ dscr = channel_readl(atchan, DSCR); + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + u32 new_dscr; + + rmb(); /* ensure DSCR is read after CTRLA */ + new_dscr = channel_readl(atchan, DSCR); + + /* + * If the DSCR register value has not changed inside the + * DMA controller since the previous read, we assume + * that both the dscr and ctrla values refers to the + * very same descriptor. + */ + if (likely(new_dscr == dscr)) + break; + + /* + * DSCR has changed inside the DMA controller, so the + * previouly read value of CTRLA may refer to an already + * processed descriptor hence could be outdated. + * We need to update ctrla to match the current + * descriptor. + */ + dscr = new_dscr; + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + } + if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ if (desc_first->lli.dscr == dscr) - return atc_calc_bytes_left(ret, ctrla, desc_first); + return atc_calc_bytes_left(ret, ctrla); ret -= desc_first->len; list_for_each_entry(desc, &desc_first->tx_list, desc_node) { @@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) } /* - * For the last descriptor in the chain we can calculate + * For the current descriptor in the chain we can calculate * the remaining bytes using the channel's register. - * Note that the transfer width of the first and last - * descriptor may differ. */ - if (!desc->lli.dscr) - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); + ret = atc_calc_bytes_left(ret, ctrla); } else { /* single transfer */ - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); + ctrla = channel_readl(atchan, CTRLA); + ret = atc_calc_bytes_left(ret, ctrla); } return ret; @@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan, desc->txd.cookie = -EBUSY; desc->total_len = desc->len = len; - desc->tx_width = dwidth; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, first->txd.cookie = -EBUSY; first->total_len = len; - /* set transfer width for the calculation of the residue */ - first->tx_width = src_width; - prev->tx_width = src_width; - /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, first->txd.cookie = -EBUSY; first->total_len = total_len; - /* set transfer width for the calculation of the residue */ - first->tx_width = reg_width; - prev->tx_width = reg_width; - /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan, desc->txd.cookie = 0; desc->len = len; - /* - * Although we only need the transfer width for the first and - * the last descriptor, its easier to set it to all descriptors. - */ - desc->tx_width = src_width; - atc_desc_chain(&first, &prev, desc); /* update the lengths and addresses for the next loop cycle */ @@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->total_len = buf_len; - first->tx_width = reg_width; return &first->txd; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index bc8d5ebedd19..7f5a08230f76 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -112,6 +112,7 @@ #define ATC_SRC_WIDTH_BYTE (0x0 << 24) #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) #define ATC_SRC_WIDTH_WORD (0x2 << 24) +#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ #define ATC_DST_WIDTH(x) ((x) << 28) #define ATC_DST_WIDTH_BYTE (0x0 << 28) @@ -182,7 +183,6 @@ struct at_lli { * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list * @len: descriptor byte count - * @tx_width: transfer width * @total_len: total transaction byte count */ struct at_desc { @@ -194,7 +194,6 @@ struct at_desc { struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; - u32 tx_width; size_t total_len; /* Interleaved data */ diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index cf1213de7865..40afa2a16cfc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, * descriptor view 2 since some fields of the configuration register * depend on transfer size and src/dest addresses. */ - if (at_xdmac_chan_is_cyclic(atchan)) { + if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); - } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) { + else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; - } else { - /* - * No need to write AT_XDMAC_CC reg, it will be done when the - * descriptor is fecthed. - */ + else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; - } + /* + * Even if the register will be updated from the configuration in the + * descriptor when using view 2 or higher, the PROT bit won't be set + * properly. This bit can be modified only by using the channel + * configuration register. + */ + at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); reg |= AT_XDMAC_CNDC_NDDUP | AT_XDMAC_CNDC_NDSUP @@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->lld.mbr_sa = mem; desc->lld.mbr_da = atchan->sconfig.dst_addr; } - desc->lld.mbr_cfg = atchan->cfg; - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); + dwidth = at_xdmac_get_dwidth(atchan->cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) - ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) + ? dwidth : AT_XDMAC_CC_DWIDTH_BYTE; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | (len >> fixed_dwidth); /* microblock length */ + desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | + AT_XDMAC_CC_DWIDTH(fixed_dwidth); dev_dbg(chan2dev(chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fbaf1ead2597..f1325f62563e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan, config &= ~0x7; config |= op_mode; - if (IS_ENABLED(__BIG_ENDIAN)) - config |= XOR_DESCRIPTOR_SWAP; - else - config &= ~XOR_DESCRIPTOR_SWAP; +#if defined(__BIG_ENDIAN) + config |= XOR_DESCRIPTOR_SWAP; +#else + config &= ~XOR_DESCRIPTOR_SWAP; +#endif writel_relaxed(config, XOR_CONFIG(chan)); chan->current_type = type; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f513f77b1d85..ecab4ea059b4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) desc->txd.callback = last->txd.callback; desc->txd.callback_param = last->txd.callback_param; } - last->last = false; + desc->last = false; dma_cookie_assign(&desc->txd); @@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = get_burst_len(desc, len); + desc->bytes_requested = len; desc->txd.flags = flags; diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7d2c17d8d30f..6f80432a3f0a 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&vc->lock, flags); cookie = dma_cookie_assign(tx); - list_move_tail(&vd->node, &vc->desc_submitted); + list_add_tail(&vd->node, &vc->desc_submitted); spin_unlock_irqrestore(&vc->lock, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", @@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg) cb_data = vd->tx.callback_param; list_del(&vd->node); - if (async_tx_test_ack(&vd->tx)) - list_add(&vd->node, &vc->desc_allocated); - else - vc->desc_free(vd); + + vc->desc_free(vd); if (cb) cb(cb_data); @@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) while (!list_empty(head)) { struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc, node); - if (async_tx_test_ack(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) dma_cookie_init(&vc->chan); spin_lock_init(&vc->lock); - INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 189e75dbcb15..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -29,7 +29,6 @@ struct virt_dma_chan { spinlock_t lock; /* protected by vc.lock */ - struct list_head desc_allocated; struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; @@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan struct virt_dma_desc *vd, unsigned long tx_flags) { extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); - unsigned long flags; dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vd->tx.flags = tx_flags; vd->tx.tx_submit = vchan_tx_submit; - spin_lock_irqsave(&vc->lock, flags); - list_add_tail(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); - return &vd->tx; } @@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) } /** - * vchan_get_all_descriptors - obtain all allocated, submitted and issued - * descriptors + * vchan_get_all_descriptors - obtain all submitted and issued descriptors * vc: virtual channel to get descriptors from * head: list of descriptors found * @@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { - list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); @@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) { - struct virt_dma_desc *vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&vc->lock, flags); vchan_get_all_descriptors(vc, &head); - list_for_each_entry(vd, &head, node) - async_tx_clear_ack(&vd->tx); spin_unlock_irqrestore(&vc->lock, flags); vchan_dma_desc_free_list(vc, &head); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 620fd55ec766..dff22ab01851 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -111,6 +111,7 @@ #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_BLK_MEM_RDY 0xD074 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF +#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 /* X-Gene SoC EFUSE csr register and bit defination */ #define XGENE_SOC_JTAG1_SHADOW 0x18 @@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev, return -ENOMEM; } + pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; + /* Get efuse csr region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); if (!res) { diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 3515b381c131..711d8ad74f11 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) */ for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *csi = &mci->csrows[row]; + struct csrow_info *csi = mci->csrows[row]; /* * Get the configuration settings for this diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 080d5cc27055..eebdf2a33bfe 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev) status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); if (status) { dev_err(&pdev->dev, "failed to register extcon device\n"); - kfree(palmas_usb->edev->name); return status; } @@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->id_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->vbus_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev) return 0; } -static int palmas_usb_remove(struct platform_device *pdev) -{ - struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); - - kfree(palmas_usb->edev->name); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int palmas_usb_suspend(struct device *dev) { @@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = { static struct platform_driver palmas_usb_driver = { .probe = palmas_usb_probe, - .remove = palmas_usb_remove, .driver = { .name = "palmas-usb", .of_match_table = of_palmas_match_tbl, diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 76157ab9faf3..43b57b02d050 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id return -EINVAL; } -static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +static int find_cable_id_by_name(struct extcon_dev *edev, const char *name) { - unsigned int id = EXTCON_NONE; + unsigned int id = -EINVAL; int i = 0; - if (edev->max_supported == 0) - return -EINVAL; - - /* Find the the number of extcon cable */ + /* Find the id of extcon cable */ while (extcon_name[i]) { if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { id = i; break; } + i++; } - if (id == EXTCON_NONE) + return id; +} + +static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +{ + unsigned int id; + + if (edev->max_supported == 0) return -EINVAL; + /* Find the the number of extcon cable */ + id = find_cable_id_by_name(edev, name); + if (id < 0) + return id; + return find_cable_index_by_id(edev, id); } @@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev, struct extcon_cable *cable = container_of(attr, struct extcon_cable, attr_state); + int i = cable->cable_index; + return sprintf(buf, "%d\n", extcon_get_cable_state_(cable->edev, - cable->cable_index)); + cable->edev->supported_cable[i])); } /** @@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state) spin_lock_irqsave(&edev->lock, flags); if (edev->state != ((edev->state & ~mask) | (state & mask))) { + u32 old_state; + if (check_mutually_exclusive(edev, (edev->state & ~mask) | (state & mask))) { spin_unlock_irqrestore(&edev->lock, flags); return -EPERM; } - for (index = 0; index < edev->max_supported; index++) { - if (is_extcon_changed(edev->state, state, index, &attached)) - raw_notifier_call_chain(&edev->nh[index], attached, edev); - } - + old_state = edev->state; edev->state &= ~mask; edev->state |= state & mask; + for (index = 0; index < edev->max_supported; index++) { + if (is_extcon_changed(old_state, edev->state, index, + &attached)) + raw_notifier_call_chain(&edev->nh[index], + attached, edev); + } + /* This could be in interrupt handler */ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); if (prop_buf) { @@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_); */ int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) { - return extcon_get_cable_state_(edev, find_cable_index_by_name - (edev, cable_name)); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_get_cable_state_(edev, id); } EXPORT_SYMBOL_GPL(extcon_get_cable_state); @@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_); int extcon_set_cable_state(struct extcon_dev *edev, const char *cable_name, bool cable_state) { - return extcon_set_cable_state_(edev, find_cable_index_by_name - (edev, cable_name), cable_state); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_set_cable_state_(edev, id, cable_state); } EXPORT_SYMBOL_GPL(extcon_set_cable_state); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e9fde72cf038..f7b49d5ce4b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1130,6 +1130,9 @@ struct amdgpu_gfx { uint32_t me_feature_version; uint32_t ce_feature_version; uint32_t pfp_feature_version; + uint32_t rlc_feature_version; + uint32_t mec_feature_version; + uint32_t mec2_feature_version; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; @@ -1639,6 +1642,7 @@ struct amdgpu_sdma { /* SDMA firmware */ const struct firmware *fw; uint32_t fw_version; + uint32_t feature_version; struct amdgpu_ring ring; }; @@ -1866,6 +1870,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); +struct amdgpu_ip_block_status { + bool valid; + bool sw; + bool hw; +}; + struct amdgpu_device { struct device *dev; struct drm_device *ddev; @@ -2008,7 +2018,7 @@ struct amdgpu_device { const struct amdgpu_ip_block_version *ip_blocks; int num_ip_blocks; - bool *ip_block_enabled; + struct amdgpu_ip_block_status *ip_block_status; struct mutex mn_lock; DECLARE_HASHTABLE(mn_hash, 7); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d79009b65867..99f158e1baff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) return -EINVAL; } - adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); - if (adev->ip_block_enabled == NULL) + adev->ip_block_status = kcalloc(adev->num_ip_blocks, + sizeof(struct amdgpu_ip_block_status), GFP_KERNEL); + if (adev->ip_block_status == NULL) return -ENOMEM; if (adev->ip_blocks == NULL) { @@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d\n", i); - adev->ip_block_enabled[i] = false; + adev->ip_block_status[i].valid = false; } else { if (adev->ip_blocks[i].funcs->early_init) { r = adev->ip_blocks[i].funcs->early_init((void *)adev); if (r == -ENOENT) - adev->ip_block_enabled[i] = false; + adev->ip_block_status[i].valid = false; else if (r) return r; else - adev->ip_block_enabled[i] = true; + adev->ip_block_status[i].valid = true; } else { - adev->ip_block_enabled[i] = true; + adev->ip_block_status[i].valid = true; } } } @@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; r = adev->ip_blocks[i].funcs->sw_init((void *)adev); if (r) return r; + adev->ip_block_status[i].sw = true; /* need to do gmc hw init early so we can allocate gpu mem */ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { r = amdgpu_vram_scratch_init(adev); @@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev) r = amdgpu_wb_init(adev); if (r) return r; + adev->ip_block_status[i].hw = true; } } for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].sw) continue; /* gmc hw init is done early */ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) @@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev) r = adev->ip_blocks[i].funcs->hw_init((void *)adev); if (r) return r; + adev->ip_block_status[i].hw = true; } return 0; @@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev) int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; /* enable clockgating to save power */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, @@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev) int i, r; for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].hw) continue; if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { amdgpu_wb_fini(adev); @@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev) return r; r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); /* XXX handle errors */ + adev->ip_block_status[i].hw = false; } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].sw) continue; r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); /* XXX handle errors */ - adev->ip_block_enabled[i] = false; + adev->ip_block_status[i].sw = false; + adev->ip_block_status[i].valid = false; } return 0; @@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev) int i, r; for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; /* ungate blocks so that suspend can properly shut them down */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, @@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; r = adev->ip_blocks[i].funcs->resume(adev); if (r) @@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); - kfree(adev->ip_block_enabled); - adev->ip_block_enabled = NULL; + kfree(adev->ip_block_status); + adev->ip_block_status = NULL; adev->accel_working = false; /* free i2c buses */ amdgpu_i2c_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ae43b58c9733..4afc507820c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -449,7 +449,7 @@ out: * vital here, so they are not reported back to userspace. */ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va) + struct amdgpu_bo_va *bo_va, uint32_t operation) { struct ttm_validate_buffer tv, *entry; struct amdgpu_bo_list_entry *vm_bos; @@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (r) goto error_unlock; - r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); + + if (operation == AMDGPU_VA_OP_MAP) + r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); error_unlock: mutex_unlock(&bo_va->vm->mutex); @@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) - amdgpu_gem_va_update_vm(adev, bo_va); + amdgpu_gem_va_update_vm(adev, bo_va, args->operation); drm_gem_object_unreference_unlocked(gobj); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 52dff75aac6f..bc0fac618a3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (vm) { /* do context switch */ amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); - } - if (vm && ring->funcs->emit_gds_switch) - amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, - ib->gds_base, ib->gds_size, - ib->gws_base, ib->gws_size, - ib->oa_base, ib->oa_size); + if (ring->funcs->emit_gds_switch) + amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, + ib->gds_base, ib->gds_size, + ib->gws_base, ib->gws_size, + ib->oa_base, ib->oa_size); - if (ring->funcs->emit_hdp_flush) - amdgpu_ring_emit_hdp_flush(ring); + if (ring->funcs->emit_hdp_flush) + amdgpu_ring_emit_hdp_flush(ring); + } old_ctx = ring->current_ctx; for (i = 0; i < num_ibs; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 31ad444c6386..3bfe67de8349 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file for (i = 0; i < adev->num_ip_blocks; i++) { if (adev->ip_blocks[i].type == type && - adev->ip_block_enabled[i]) { + adev->ip_block_status[i].valid) { ip.hw_ip_version_major = adev->ip_blocks[i].major; ip.hw_ip_version_minor = adev->ip_blocks[i].minor; ip.capabilities_flags = 0; @@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file for (i = 0; i < adev->num_ip_blocks; i++) if (adev->ip_blocks[i].type == type && - adev->ip_block_enabled[i] && + adev->ip_block_status[i].valid && count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) count++; @@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_INFO_FW_GFX_RLC: fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = 0; + fw_info.feature = adev->gfx.rlc_feature_version; break; case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) + if (info->query_fw.index == 0) { fw_info.ver = adev->gfx.mec_fw_version; - else if (info->query_fw.index == 1) + fw_info.feature = adev->gfx.mec_feature_version; + } else if (info->query_fw.index == 1) { fw_info.ver = adev->gfx.mec2_fw_version; - else + fw_info.feature = adev->gfx.mec2_feature_version; + } else return -EINVAL; - fw_info.feature = 0; break; case AMDGPU_INFO_FW_SMC: fw_info.ver = adev->pm.fw_version; @@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (info->query_fw.index >= 2) return -EINVAL; fw_info.ver = adev->sdma[info->query_fw.index].fw_version; - fw_info.feature = 0; + fw_info.feature = adev->sdma[info->query_fw.index].feature_version; break; default: return -EINVAL; @@ -416,7 +417,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info; + struct drm_amdgpu_info_device dev_info = {}; struct amdgpu_cu_info cu_info; dev_info.device_id = dev->pdev->device; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2f7a5efa21c2..f5c22556ec2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) unsigned height_in_mb = ALIGN(height / 16, 2); unsigned fs_in_mb = width_in_mb * height_in_mb; - unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; + unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size; image_size = width * height; image_size += image_size / 2; @@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; min_dpb_size = image_size * num_dpb_buffer; + min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) + * 16 * num_dpb_buffer + 52 * 1024; break; default: @@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) buf_sizes[0x1] = dpb_size; buf_sizes[0x2] = image_size; + buf_sizes[0x4] = min_ctx_size; return 0; } @@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) return -EINVAL; } + } else if (cmd == 0x206) { + if ((end - start) < ctx->buf_sizes[4]) { + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, + (unsigned)(end - start), + ctx->buf_sizes[4]); + return -EINVAL; + } } else if ((cmd != 0x100) && (cmd != 0x204)) { DRM_ERROR("invalid UVD command %X!\n", cmd); return -EINVAL; @@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) struct amdgpu_uvd_cs_ctx ctx = {}; unsigned buf_sizes[] = { [0x00000000] = 2048, - [0x00000001] = 32 * 1024 * 1024, - [0x00000002] = 2048 * 1152 * 3, + [0x00000001] = 0xFFFFFFFF, + [0x00000002] = 0xFFFFFFFF, [0x00000003] = 2048, + [0x00000004] = 0xFFFFFFFF, }; struct amdgpu_ib *ib = &parser->ibs[ib_idx]; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ab83cc1ca4cc..15df46c93f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2c188fb9fd22..0d8bf2cb1956 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, * sheduling on the ring. This function schedules the IB * on the gfx ring for execution by the GPU. */ -static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, +static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { bool need_ctx_switch = ring->current_ctx != ib->ctx; @@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, u32 next_rptr = ring->wptr + 5; /* drop the CE preamble IB for the same context */ - if ((ring->type == AMDGPU_RING_TYPE_GFX) && - (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && - !need_ctx_switch) + if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) return; - if (ring->type == AMDGPU_RING_TYPE_COMPUTE) - control |= INDIRECT_BUFFER_VALID; - - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) + if (need_ctx_switch) next_rptr += 2; next_rptr += 4; @@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { + if (need_ctx_switch) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, control); } +static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 header, control = 0; + u32 next_rptr = ring->wptr + 5; + + control |= INDIRECT_BUFFER_VALID; + next_rptr += 4; + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); + amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, next_rptr); + + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | + (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + + amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); + amdgpu_ring_write(ring, control); +} + /** * gfx_v7_0_ring_test_ib - basic ring IB test * @@ -3056,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu( + mec_hdr->ucode_feature_version); gfx_v7_0_cp_compute_enable(adev, false); @@ -3078,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + mec2_hdr->ucode_feature_version); /* MEC2 */ fw_data = (const __le32 *) @@ -4042,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu( + hdr->ucode_feature_version); gfx_v7_0_rlc_stop(adev); @@ -5098,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle) dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", RREG32(mmCP_HPD_EOP_CONTROL)); - for (queue = 0; queue < 8; i++) { + for (queue = 0; queue < 8; queue++) { cik_srbm_select(adev, me, pipe, queue, 0); dev_info(adev->dev, " queue: %d\n", queue); dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", @@ -5555,7 +5585,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .get_wptr = gfx_v7_0_ring_get_wptr_gfx, .set_wptr = gfx_v7_0_ring_set_wptr_gfx, .parse_cs = NULL, - .emit_ib = gfx_v7_0_ring_emit_ib, + .emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx, .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, @@ -5571,7 +5601,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .get_wptr = gfx_v7_0_ring_get_wptr_compute, .set_wptr = gfx_v7_0_ring_set_wptr_compute, .parse_cs = NULL, - .emit_ib = gfx_v7_0_ring_emit_ib, + .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1c7c992dea37..20e2cfd521d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct gfx_firmware_header_v1_0 *cp_hdr; DRM_DEBUG("\n"); @@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.pfp_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); @@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.me_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); @@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.ce_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); @@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); @@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec2_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw_version = le32_to_cpu( + cp_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + cp_hdr->ucode_feature_version); } else { err = 0; adev->gfx.mec2_fw = NULL; @@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 2; adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; switch (adev->pdev->revision) { case 0xc4: @@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcc: /* B10 */ adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc5: case 0x81: @@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcd: /* B8 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc6: case 0xca: case 0xce: /* B6 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc7: case 0x87: @@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) default: /* B4 */ adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; break; } @@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); - adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); @@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); amdgpu_ucode_print_gfx_hdr(&me_hdr->header); - adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); - adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); - adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); - adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version); - adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version); - adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version); gfx_v8_0_cp_gfx_enable(adev, false); @@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); - adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); fw_data = (const __le32 *) (adev->gfx.mec_fw->data + @@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); - adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); fw_data = (const __le32 *) (adev->gfx.mec2_fw->data + @@ -3125,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, - 0x7FFFF << 2); + AMDGPU_DOORBELL_MEC_RING7 << 2); } tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -3753,7 +3763,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x20); /* poll interval */ } -static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, +static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { bool need_ctx_switch = ring->current_ctx != ib->ctx; @@ -3761,15 +3771,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, u32 next_rptr = ring->wptr + 5; /* drop the CE preamble IB for the same context */ - if ((ring->type == AMDGPU_RING_TYPE_GFX) && - (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && - !need_ctx_switch) + if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) return; - if (ring->type == AMDGPU_RING_TYPE_COMPUTE) - control |= INDIRECT_BUFFER_VALID; - - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) + if (need_ctx_switch) next_rptr += 2; next_rptr += 4; @@ -3780,7 +3785,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { + if (need_ctx_switch) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -3803,6 +3808,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, control); } +static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 header, control = 0; + u32 next_rptr = ring->wptr + 5; + + control |= INDIRECT_BUFFER_VALID; + + next_rptr += 4; + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); + amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, next_rptr); + + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | + (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + + amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); + amdgpu_ring_write(ring, control); +} + static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { @@ -4224,7 +4259,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .get_wptr = gfx_v8_0_ring_get_wptr_gfx, .set_wptr = gfx_v8_0_ring_set_wptr_gfx, .parse_cs = NULL, - .emit_ib = gfx_v8_0_ring_emit_ib, + .emit_ib = gfx_v8_0_ring_emit_ib_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx, .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, @@ -4240,7 +4275,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .get_wptr = gfx_v8_0_ring_get_wptr_compute, .set_wptr = gfx_v8_0_ring_set_wptr_compute, .parse_cs = NULL, - .emit_ib = gfx_v8_0_ring_emit_ib, + .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index d7895885fe0c..a988dfb1d394 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 7bb37b93993f..2b86569b18d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5b59d5ad7d1c..9dcc7280e572 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } funcs = connector->helper_private; - new_encoder = funcs->best_encoder(connector); + + if (funcs->atomic_best_encoder) + new_encoder = funcs->atomic_best_encoder(connector, + connector_state); + else + new_encoder = funcs->best_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", @@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } } + if (WARN_ON(!connector_state->crtc)) + return -EINVAL; + connector_state->best_encoder = new_encoder; idx = drm_crtc_index(connector_state->crtc); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 778bbb6425b8..b0487c9f018c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1294,7 +1294,6 @@ retry: goto retry; } DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); - WARN(1, "fail\n"); return -EIO; } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f9cc68fbd2a3..b50fa0afd907 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600) module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); static void store_vblank(struct drm_device *dev, int crtc, - unsigned vblank_count_inc, + u32 vblank_count_inc, struct timeval *t_vblank) { struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 842d6b8dc3c4..2a652359af64 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev) spin_lock_init(&ctx->lock); platform_set_drvdata(pdev, ctx); - pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = exynos_drm_ippdrv_register(ippdrv); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 8040ed2a831f..f1c6b76c127f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev, gsc_write(cfg, GSC_IN_CON); - ctx->rotation = cfg & - (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0; + ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; *swap = ctx->rotation; return 0; @@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev, gsc_write(cfg, GSC_IN_CON); - ctx->rotation = cfg & - (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0; + ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; *swap = ctx->rotation; return 0; diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 99e286489031..4a00990e4ae4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector) { struct hdmi_context *hdata = ctx_from_connector(connector); struct edid *edid; + int ret; if (!hdata->ddc_adpt) return -ENODEV; @@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return ret; } static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index cae98db33062..4706b56902b4 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) /* handling VSYNC */ if (val & MXR_INT_STATUS_VSYNC) { + /* vsync interrupt use different bit for read and clear */ + val |= MXR_INT_CLEAR_VSYNC; + val &= ~MXR_INT_STATUS_VSYNC; + /* interlace scan need to check shadow register */ if (ctx->interlace) { base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); @@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) out: /* clear interrupts */ - if (~val & MXR_INT_EN_VSYNC) { - /* vsync interrupt use different bit for read and clear */ - val &= ~MXR_INT_EN_VSYNC; - val |= MXR_INT_CLEAR_VSYNC; - } mixer_reg_write(res, MXR_INT_STATUS, val); spin_unlock(&res->reg_slock); @@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) } /* enable vsync interrupt */ - mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, - MXR_INT_EN_VSYNC); + mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); + mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); return 0; } @@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) struct mixer_context *mixer_ctx = crtc->ctx; struct mixer_resources *res = &mixer_ctx->mixer_res; + if (!mixer_ctx->powered) { + mixer_ctx->int_en &= MXR_INT_EN_VSYNC; + return; + } + /* disable vsync interrupt */ + mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); } @@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); + if (ctx->int_en & MXR_INT_EN_VSYNC) + mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); mixer_reg_write(res, MXR_INT_EN, ctx->int_en); mixer_win_reset(ctx); } diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index fe1599d75f14..424228be79ae 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -606,8 +606,6 @@ static void tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, uint8_t *buf, size_t size) { - buf[PB(0)] = tda998x_cksum(buf, size); - reg_clear(priv, REG_DIP_IF_FLAGS, bit); reg_write_range(priv, addr, buf, size); reg_set(priv, REG_DIP_IF_FLAGS, bit); @@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p) buf[PB(4)] = p->audio_frame[4]; buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ + buf[PB(0)] = tda998x_cksum(buf, sizeof(buf)); + tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, sizeof(buf)); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5f27290201e0..fd1de451c8c6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ - u32 upper = I915_READ(upper_reg); \ - u32 lower = I915_READ(lower_reg); \ - u32 tmp = I915_READ(upper_reg); \ - if (upper != tmp) { \ - upper = tmp; \ - lower = I915_READ(lower_reg); \ - WARN_ON(I915_READ(upper_reg) != upper); \ - } \ - (u64)upper << 32 | lower; }) + u32 upper, lower, tmp; \ + tmp = I915_READ(upper_reg); \ + do { \ + upper = tmp; \ + lower = I915_READ(lower_reg); \ + tmp = I915_READ(upper_reg); \ + } while (upper != tmp); \ + (u64)upper << 32 | lower; }) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56b52a4767d4..31e8269e6e3d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1923,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma, vma->vm->insert_entries(vma->vm, pages, vma->node.start, cache_level, pte_flags); + + /* Note the inconsistency here is due to absence of the + * aliasing ppgtt on gen4 and earlier. Though we always + * request PIN_USER for execbuffer (translated to LOCAL_BIND), + * without the appgtt, we cannot honour that request and so + * must substitute it with a global binding. Since we do this + * behind the upper layers back, we need to explicitly set + * the bound flag ourselves. + */ + vma->bound |= GLOBAL_BIND; + } if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 633bd1fcab69..d19c9db5e18c 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ - args->phys_swizzle_mode = args->swizzle_mode; + if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; + else + args->phys_swizzle_mode = args->swizzle_mode; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 198fc3c3291b..3dcd59e694db 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1075,15 +1075,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv, const union child_device_config *p_child; union child_device_config *child_dev_ptr; int i, child_device_num, count; - u16 block_size; + u8 expected_size; + u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } - if (p_defs->child_dev_size < sizeof(*p_child)) { - DRM_ERROR("General definiton block child device size is too small.\n"); + if (bdb->version < 195) { + expected_size = 33; + } else if (bdb->version == 195) { + expected_size = 37; + } else if (bdb->version <= 197) { + expected_size = 38; + } else { + expected_size = 38; + DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n", + expected_size, bdb->version); + } + + if (expected_size > sizeof(*p_child)) { + DRM_ERROR("child_device_config cannot fit in p_child\n"); + return; + } + + if (p_defs->child_dev_size != expected_size) { + DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n", + p_defs->child_dev_size, expected_size, bdb->version); return; } /* get the block size of general definitions */ @@ -1130,7 +1149,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, child_dev_ptr = dev_priv->vbt.child_dev + count; count++; - memcpy(child_dev_ptr, p_child, sizeof(*p_child)); + memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); } return; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 6e4cc5334f47..600afdbef8c9 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, return MODE_OK; } +static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *state) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct intel_crtc *crtc = to_intel_crtc(state->crtc); + + return &intel_dp->mst_encoders[crtc->pipe]->base.base; +} + static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, + .atomic_best_encoder = intel_mst_atomic_best_encoder, .best_encoder = intel_mst_best_encoder, }; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 0d1dbb737933..247a424445f7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t op_mode = 0; uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; - enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); + enum mdp4_frame_format frame_type; if (!(crtc && fb)) { DBG("%s: disabled!", mdp4_plane->name); return 0; } + frame_type = mdp4_get_frame_format(fb); + /* src values are in Q16 fixed point, convert to integer: */ src_x = src_x >> 16; src_y = src_y >> 16; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 206f758f7d64..e253db5de5aa 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { + int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + int nplanes = mdp5_kms->dev->mode_config.num_total_plane; + + for (i = 0; i < nplanes; i++) { + struct drm_plane *plane = state->planes[i]; + struct drm_plane_state *plane_state = state->plane_states[i]; + + if (!plane) + continue; + + mdp5_plane_complete_commit(plane, plane_state); + } + mdp5_disable(mdp5_kms); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index e0eb24587c84..e79ac09b7216 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); void mdp5_plane_complete_flip(struct drm_plane *plane); +void mdp5_plane_complete_commit(struct drm_plane *plane, + struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 57b8f56ae9d0..22275568ab8b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -31,8 +31,6 @@ struct mdp5_plane { uint32_t nformats; uint32_t formats[32]; - - bool enabled; }; #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) @@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state) return state->fb && state->crtc; } -static int mdp5_plane_disable(struct drm_plane *plane) -{ - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; - - DBG("%s: disable", mdp5_plane->name); - - if (mdp5_kms) { - /* Release the memory we requested earlier from the SMP: */ - mdp5_smp_release(mdp5_kms->smp, pipe); - } - - return 0; -} - static void mdp5_plane_destroy(struct drm_plane *plane) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); @@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, if (!plane_enabled(state)) { to_mdp5_plane_state(state)->pending = true; - mdp5_plane_disable(plane); } else if (to_mdp5_plane_state(state)->mode_changed) { int ret; to_mdp5_plane_state(state)->pending = true; @@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) return mdp5_plane->flush_mask; } +/* called after vsync in thread context */ +void mdp5_plane_complete_commit(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + enum mdp5_pipe pipe = mdp5_plane->pipe; + + if (!plane_enabled(plane->state)) { + DBG("%s: free SMP", mdp5_plane->name); + mdp5_smp_release(mdp5_kms->smp, pipe); + } +} + /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 16702aecf0df..64a27d86f2f5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -34,22 +34,44 @@ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). * * For each block that can be dynamically allocated, it can be either - * free, or pending/in-use by a client. The updates happen in three steps: + * free: + * The block is free. + * + * pending: + * The block is allocated to some client and not free. + * + * configured: + * The block is allocated to some client, and assigned to that + * client in MDP5_MDP_SMP_ALLOC registers. + * + * inuse: + * The block is being actively used by a client. + * + * The updates happen in the following steps: * * 1) mdp5_smp_request(): * When plane scanout is setup, calculate required number of - * blocks needed per client, and request. Blocks not inuse or - * pending by any other client are added to client's pending - * set. + * blocks needed per client, and request. Blocks neither inuse nor + * configured nor pending by any other client are added to client's + * pending set. + * For shrinking, blocks in pending but not in configured can be freed + * directly, but those already in configured will be freed later by + * mdp5_smp_commit. * * 2) mdp5_smp_configure(): * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers * are configured for the union(pending, inuse) + * Current pending is copied to configured. + * It is assumed that mdp5_smp_request and mdp5_smp_configure not run + * concurrently for the same pipe. * * 3) mdp5_smp_commit(): - * After next vblank, copy pending -> inuse. Optionally update + * After next vblank, copy configured -> inuse. Optionally update * MDP5_SMP_ALLOC registers if there are newly unused blocks * + * 4) mdp5_smp_release(): + * Must be called after the pipe is disabled and no longer uses any SMB + * * On the next vblank after changes have been committed to hw, the * client's pending blocks become it's in-use blocks (and no-longer * in-use blocks become available to other clients). @@ -77,6 +99,9 @@ struct mdp5_smp { struct mdp5_client_smp_state client_state[MAX_CLIENTS]; }; +static void update_smp_state(struct mdp5_smp *smp, + u32 cid, mdp5_smp_state_t *assigned); + static inline struct mdp5_kms *get_kms(struct mdp5_smp *smp) { @@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp, for (i = cur_nblks; i > nblks; i--) { int blk = find_first_bit(ps->pending, cnt); clear_bit(blk, ps->pending); - /* don't clear in global smp_state until _commit() */ + + /* clear in global smp_state if not in configured + * otherwise until _commit() + */ + if (!test_bit(blk, ps->configured)) + clear_bit(blk, smp->state); } } @@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid /* Release SMP blocks for all clients of the pipe */ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) { - int i, nblks; + int i; + unsigned long flags; + int cnt = smp->blk_cnt; + + for (i = 0; i < pipe2nclients(pipe); i++) { + mdp5_smp_state_t assigned; + u32 cid = pipe2client(pipe, i); + struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + + spin_lock_irqsave(&smp->state_lock, flags); + + /* clear hw assignment */ + bitmap_or(assigned, ps->inuse, ps->configured, cnt); + update_smp_state(smp, CID_UNUSED, &assigned); + + /* free to global pool */ + bitmap_andnot(smp->state, smp->state, ps->pending, cnt); + bitmap_andnot(smp->state, smp->state, assigned, cnt); + + /* clear client's infor */ + bitmap_zero(ps->pending, cnt); + bitmap_zero(ps->configured, cnt); + bitmap_zero(ps->inuse, cnt); + + spin_unlock_irqrestore(&smp->state_lock, flags); + } - for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++) - smp_request_block(smp, pipe2client(pipe, i), 0); set_fifo_thresholds(smp, pipe, 0); } @@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) u32 cid = pipe2client(pipe, i); struct mdp5_client_smp_state *ps = &smp->client_state[cid]; - bitmap_or(assigned, ps->inuse, ps->pending, cnt); + /* + * if vblank has not happened since last smp_configure + * skip the configure for now + */ + if (!bitmap_equal(ps->inuse, ps->configured, cnt)) + continue; + + bitmap_copy(ps->configured, ps->pending, cnt); + bitmap_or(assigned, ps->inuse, ps->configured, cnt); update_smp_state(smp, cid, &assigned); } } -/* step #3: after vblank, copy pending -> inuse: */ +/* step #3: after vblank, copy configured -> inuse: */ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) { int cnt = smp->blk_cnt; @@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) * using, which can be released and made available to other * clients: */ - if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { + if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) { unsigned long flags; spin_lock_irqsave(&smp->state_lock, flags); @@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) update_smp_state(smp, CID_UNUSED, &released); } - bitmap_copy(ps->inuse, ps->pending, cnt); + bitmap_copy(ps->inuse, ps->configured, cnt); } } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h index e47179f63585..5b6c2363f592 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h @@ -23,6 +23,7 @@ struct mdp5_client_smp_state { mdp5_smp_state_t inuse; + mdp5_smp_state_t configured; mdp5_smp_state_t pending; }; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 1b22d8bfe142..1ceb4f22dd89 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev, timeout = ktime_add_ms(ktime_get(), 1000); - ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); - if (ret) { - WARN_ON(ret); // TODO unswap state back? or?? - commit_destroy(c); - return ret; - } + /* uninterruptible wait */ + msm_wait_fence(dev, c->fence, &timeout, false); complete_commit(c); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b7ef56ed8d1c..d3467b115e04 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor) * Fences: */ -int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, - ktime_t *timeout) +int msm_wait_fence(struct drm_device *dev, uint32_t fence, + ktime_t *timeout , bool interruptible) { struct msm_drm_private *priv = dev->dev_private; int ret; @@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, remaining_jiffies = timespec_to_jiffies(&ts); } - ret = wait_event_interruptible_timeout(priv->fence_event, + if (interruptible) + ret = wait_event_interruptible_timeout(priv->fence_event, + fence_completed(dev, fence), + remaining_jiffies); + else + ret = wait_event_timeout(priv->fence_event, fence_completed(dev, fence), remaining_jiffies); @@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, return -EINVAL; } - return msm_wait_fence_interruptable(dev, args->fence, &timeout); + return msm_wait_fence(dev, args->fence, &timeout, true); } static const struct drm_ioctl_desc msm_ioctls[] = { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index e7c5ea125d45..4ff0ec9c994b 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev, int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); -int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, - ktime_t *timeout); +int msm_wait_fence(struct drm_device *dev, uint32_t fence, + ktime_t *timeout, bool interruptible); int msm_queue_fence_cb(struct drm_device *dev, struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct drm_device *dev, uint32_t fence); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f211b80e3a1e..c76cc853b08a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) if (op & MSM_PREP_NOSYNC) timeout = NULL; - ret = msm_wait_fence_interruptable(dev, fence, timeout); + ret = msm_wait_fence(dev, fence, timeout, true); } /* TODO cache maintenance */ diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index dd7a7ab603e2..831461bc98a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -23,8 +23,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - BUG_ON(!msm_obj->sgt); /* should have already pinned! */ - return msm_obj->sgt; + int npages = obj->size >> PAGE_SHIFT; + + if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ + return NULL; + + return drm_prime_pages_to_sg(msm_obj->pages, npages); } void *msm_gem_prime_vmap(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 649024d4daf1..477cbb12809b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli) nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); nvif_client_fini(&cli->base); usif_client_fini(cli); + kfree(cli); } static void @@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv) pm_runtime_get_sync(dev->dev); + mutex_lock(&cli->mutex); if (cli->abi16) nouveau_abi16_fini(cli->abi16); + mutex_unlock(&cli->mutex); mutex_lock(&drm->client.mutex); list_del(&cli->head); diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 775277f1edb0..dcfbbfaf1739 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu) return 0; } +#if IS_ENABLED(CONFIG_IOMMU_API) + static void nouveau_platform_probe_iommu(struct device *dev, struct nouveau_platform_gpu *gpu) { @@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev, } } +#else + +static void nouveau_platform_probe_iommu(struct device *dev, + struct nouveau_platform_gpu *gpu) +{ +} + +static void nouveau_platform_remove_iommu(struct device *dev, + struct nouveau_platform_gpu *gpu) +{ +} + +#endif + static int nouveau_platform_probe(struct platform_device *pdev) { struct nouveau_platform_gpu *gpu; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 18f449715788..7464aef34674 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, node->page_shift = 12; switch (drm->device.info.family) { + case NV_DEVICE_INFO_V0_TNT: + case NV_DEVICE_INFO_V0_CELSIUS: + case NV_DEVICE_INFO_V0_KELVIN: + case NV_DEVICE_INFO_V0_RANKINE: + case NV_DEVICE_INFO_V0_CURIE: + break; case NV_DEVICE_INFO_V0_TESLA: if (drm->device.info.chipset != 0x50) node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; break; case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: + case NV_DEVICE_INFO_V0_MAXWELL: node->memtype = (nvbo->tile_flags & 0xff00) >> 8; break; default: + NV_WARN(drm, "%s: unhandled family type %x\n", __func__, + drm->device.info.family); break; } diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 4ef602c5469d..495c57644ced 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info) if (ret) return ret; - if (RING_SPACE(chan, 49)) { + if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) { nouveau_fbcon_gpu_lockup(info); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7da7958556a3..981342d142ff 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update) { struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - if (show && nv_crtc->cursor.nvbo) + if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled) nv50_crtc_cursor_show(nv_crtc); else nv50_crtc_cursor_hide(nv_crtc); diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 394c89abcc97..901130b06072 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info) if (ret) return ret; - ret = RING_SPACE(chan, 59); + ret = RING_SPACE(chan, 58); if (ret) { nouveau_fbcon_gpu_lockup(info); return ret; @@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->var.yres_virtual); OUT_RING(chan, upper_32_bits(fb->vma.offset)); OUT_RING(chan, lower_32_bits(fb->vma.offset)); + FIRE_RING(chan); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 61246677e8dc..fcd2e5f27bb9 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = RING_SPACE(chan, 60); + ret = RING_SPACE(chan, 58); if (ret) { WARN_ON(1); nouveau_fbcon_gpu_lockup(info); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c index 9ef6728c528d..7f2f05f78cc8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c @@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; default: nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); - return 0x0000; + return NULL; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 5606c25e5d02..ca11ddb6ed46 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv) gf100_gr_zbc_clear_depth(priv, index); } +/** + * Wait until GR goes idle. GR is considered idle if it is disabled by the + * MC (0x200) register, or GR is not busy and a context switch is not in + * progress. + */ +int +gf100_gr_wait_idle(struct gf100_gr_priv *priv) +{ + unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000); + bool gr_enabled, ctxsw_active, gr_busy; + + do { + /* + * required to make sure FIFO_ENGINE_STATUS (0x2640) is + * up-to-date + */ + nv_rd32(priv, 0x400700); + + gr_enabled = nv_rd32(priv, 0x200) & 0x1000; + ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000; + gr_busy = nv_rd32(priv, 0x40060c) & 0x1; + + if (!gr_enabled || (!gr_busy && !ctxsw_active)) + return 0; + } while (time_before(jiffies, end_jiffies)); + + nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n", + gr_enabled, ctxsw_active, gr_busy); + return -EAGAIN; +} + void gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) { @@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) while (addr < next) { nv_wr32(priv, 0x400200, addr); - nv_wait(priv, 0x400700, 0x00000002, 0x00000000); + /** + * Wait for GR to go idle after submitting a + * GO_IDLE bundle + */ + if ((addr & 0xffff) == 0xe100) + gf100_gr_wait_idle(priv); + nv_wait(priv, 0x400700, 0x00000004, 0x00000000); addr += init->pitch; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 8af1a89eda84..c9533fdac4fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -181,6 +181,7 @@ struct gf100_gr_oclass { int ppc_nr; }; +int gf100_gr_wait_idle(struct gf100_gr_priv *); void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *); void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *); void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c index 2006c445938d..4cf36a3aa814 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c @@ -332,9 +332,12 @@ static void nvkm_perfctx_dtor(struct nvkm_object *object) { struct nvkm_pm *ppm = (void *)object->engine; + struct nvkm_perfctx *ctx = (void *)object; + mutex_lock(&nv_subdev(ppm)->mutex); - nvkm_engctx_destroy(&ppm->context->base); - ppm->context = NULL; + nvkm_engctx_destroy(&ctx->base); + if (ppm->context == ctx) + ppm->context = NULL; mutex_unlock(&nv_subdev(ppm)->mutex); } @@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, mutex_lock(&nv_subdev(ppm)->mutex); if (ppm->context == NULL) ppm->context = ctx; - mutex_unlock(&nv_subdev(ppm)->mutex); - if (ctx != ppm->context) - return -EBUSY; + ret = -EBUSY; + mutex_unlock(&nv_subdev(ppm)->mutex); - return 0; + return ret; } struct nvkm_oclass diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index f67cdae1e90a..f4611e3f0971 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init) } /** + * INIT_PLL_INDIRECT - opcode 0x59 + * + */ +static void +init_pll_indirect(struct nvbios_init *init) +{ + struct nvkm_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u16 addr = nv_ro16(bios, init->offset + 5); + u32 freq = (u32)nv_ro16(bios, addr) * 1000; + + trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n", + reg, addr, freq); + init->offset += 7; + + init_prog_pll(init, reg, freq); +} + +/** + * INIT_ZM_REG_INDIRECT - opcode 0x5a + * + */ +static void +init_zm_reg_indirect(struct nvbios_init *init) +{ + struct nvkm_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u16 addr = nv_ro16(bios, init->offset + 5); + u32 data = nv_ro32(bios, addr); + + trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n", + reg, addr, data); + init->offset += 7; + + init_wr32(init, addr, data); +} + +/** * INIT_SUB_DIRECT - opcode 0x5b * */ @@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode { [0x56] = { init_condition_time }, [0x57] = { init_ltime }, [0x58] = { init_zm_reg_sequence }, + [0x59] = { init_pll_indirect }, + [0x5a] = { init_zm_reg_indirect }, [0x5b] = { init_sub_direct }, [0x5c] = { init_jump }, [0x5e] = { init_i2c_if }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c index 822d32a28d6e..065e9f5c8db9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c @@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz, struct gt215_clk_info *info) { struct gt215_clk_priv *priv = (void *)clock; - u32 oclk, sclk, sdiv, diff; + u32 oclk, sclk, sdiv; + s32 diff; info->clk = 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c index c0fdb89e74ac..24dcdfb58a8d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c @@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv) nv_wr32(priv, 0x12004c, 0x4); nv_wr32(priv, 0x122204, 0x2); nv_rd32(priv, 0x122204); + + /* + * Bug: increase clock timeout to avoid operation failure at high + * gpcclk rate. + */ + nv_wr32(priv, 0x122354, 0x800); + nv_wr32(priv, 0x128328, 0x800); + nv_wr32(priv, 0x124320, 0x800); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c index 80614f1b2074..282143f49d72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c @@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object) { struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); struct nv04_instobj_priv *node = (void *)object; + struct nvkm_subdev *subdev = (void *)priv; + + mutex_lock(&subdev->mutex); nvkm_mm_free(&priv->heap, &node->mem); + mutex_unlock(&subdev->mutex); + nvkm_instobj_destroy(&node->base); } @@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); struct nv04_instobj_priv *node; struct nvkm_instobj_args *args = data; + struct nvkm_subdev *subdev = (void *)priv; int ret; if (!args->align) @@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; + mutex_lock(&subdev->mutex); ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, args->align, &node->mem); + mutex_unlock(&subdev->mutex); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index dd39f434b4a7..c3872598b85a 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, encoder_mode = atombios_get_encoder_mode(encoder); if (connector && (radeon_audio != 0) && ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || - (ENCODER_MODE_IS_DP(encoder_mode) && - drm_detect_monitor_audio(radeon_connector_edid(connector))))) + ENCODER_MODE_IS_DP(encoder_mode))) radeon_audio_mode_set(encoder, adjusted_mode); } diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 68fd9fc677e3..44480c1b9738 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 offset; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->offset; - - WREG32(AFMT_AUDIO_SRC_CONTROL + offset, - AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); + WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, + AFMT_AUDIO_SRC_SELECT(dig->pin->id)); } void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, - struct drm_connector *connector, struct drm_display_mode *mode) + struct drm_connector *connector, + struct drm_display_mode *mode) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 tmp = 0, offset; + u32 tmp = 0; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) { if (connector->latency_present[1]) tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | @@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, else tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); } - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); + WREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); } void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, - u8 *sadb, int sad_count) + u8 *sadb, int sad_count) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 offset, tmp; + u32 tmp; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - /* program the speaker allocation */ - tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp = RREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); /* set HDMI mode */ tmp |= HDMI_CONNECTION; @@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, tmp |= SPEAKER_ALLOCATION(sadb[0]); else tmp |= SPEAKER_ALLOCATION(5); /* stereo */ - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + WREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); } void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, - u8 *sadb, int sad_count) + u8 *sadb, int sad_count) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 offset, tmp; + u32 tmp; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - /* program the speaker allocation */ - tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp = RREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); /* set DP mode */ tmp |= DP_CONNECTION; @@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, tmp |= SPEAKER_ALLOCATION(sadb[0]); else tmp |= SPEAKER_ALLOCATION(5); /* stereo */ - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + WREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); } void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, - struct cea_sad *sads, int sad_count) + struct cea_sad *sads, int sad_count) { - u32 offset; int i; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; @@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, }; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { u32 value = 0; u8 stereo_freqs = 0; @@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); - WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); + WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value); } } @@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev, } void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, - struct radeon_crtc *crtc, unsigned int clock) + struct radeon_crtc *crtc, unsigned int clock) { /* Two dtos; generally use dto0 for HDMI */ u32 value = 0; @@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, } void dce6_dp_audio_set_dto(struct radeon_device *rdev, - struct radeon_crtc *crtc, unsigned int clock) + struct radeon_crtc *crtc, unsigned int clock) { /* Two dtos; generally use dto1 for DP */ u32 value = 0; diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index fa719c53449b..fbc8d88d6e5d 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = { static void radeon_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin, u8 enable_mask) { + struct drm_encoder *encoder; + struct radeon_encoder *radeon_encoder; + struct radeon_encoder_atom_dig *dig; + int pin_count = 0; + + if (!pin) + return; + + if (rdev->mode_info.mode_config_initialized) { + list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { + if (radeon_encoder_is_digital(encoder)) { + radeon_encoder = to_radeon_encoder(encoder); + dig = radeon_encoder->enc_priv; + if (dig->pin == pin) + pin_count++; + } + } + + if ((pin_count > 1) && (enable_mask == 0)) + return; + } + if (rdev->audio.funcs->enable) rdev->audio.funcs->enable(rdev, pin, enable_mask); } @@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset, static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) { - struct radeon_encoder *radeon_encoder; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct cea_sad *sads; int sad_count; - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); + if (!connector) return; - } sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); if (sad_count <= 0) { @@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) } BUG_ON(!sads); - radeon_encoder = to_radeon_encoder(encoder); - if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count); @@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; u8 *sadb = NULL; int sad_count; - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); + if (!connector) return; - } - sad_count = drm_edid_to_speaker_allocation( - radeon_connector_edid(connector), &sadb); + sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), + &sadb); if (sad_count < 0) { DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); @@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) } static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { - struct radeon_encoder *radeon_encoder; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = 0; - - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); + if (!connector) return; - } - - radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields) radeon_encoder->audio->write_latency_fields(encoder, connector, mode); @@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder) } void radeon_audio_detect(struct drm_connector *connector, + struct drm_encoder *encoder, enum drm_connector_status status) { - struct radeon_device *rdev; - struct radeon_encoder *radeon_encoder; + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; - if (!connector || !connector->encoder) + if (!radeon_audio_chipset_supported(rdev)) return; - rdev = connector->encoder->dev->dev_private; - - if (!radeon_audio_chipset_supported(rdev)) + if (!radeon_encoder_is_digital(encoder)) return; - radeon_encoder = to_radeon_encoder(connector->encoder); dig = radeon_encoder->enc_priv; if (status == connector_status_connected) { - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { - radeon_encoder->audio = NULL; - return; - } - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector, radeon_encoder->audio = rdev->audio.hdmi_funcs; } - dig->afmt->pin = radeon_audio_get_pin(connector->encoder); - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (!dig->pin) + dig->pin = radeon_audio_get_pin(encoder); + radeon_audio_enable(rdev, dig->pin, 0xf); + } else { + radeon_audio_enable(rdev, dig->pin, 0); + dig->pin = NULL; + } } else { - radeon_audio_enable(rdev, dig->afmt->pin, 0); - dig->afmt->pin = NULL; + radeon_audio_enable(rdev, dig->pin, 0); + dig->pin = NULL; } } @@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock } static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; struct hdmi_avi_infoframe frame; int err; - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return -ENOENT; - } + if (!connector) + return -EINVAL; err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (err < 0) { @@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, return err; } - if (dig && dig->afmt && - radeon_encoder->audio && radeon_encoder->audio->set_avi_packet) + if (dig && dig->afmt && radeon_encoder->audio && + radeon_encoder->audio->set_avi_packet) radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset, buffer, sizeof(buffer)); @@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (!dig || !dig->afmt) return; - radeon_audio_set_mute(encoder, true); + if (!connector) + return; - radeon_audio_write_speaker_allocation(encoder); - radeon_audio_write_sad_regs(encoder); - radeon_audio_write_latency_fields(encoder, mode); - radeon_audio_set_dto(encoder, mode->clock); - radeon_audio_set_vbi_packet(encoder); - radeon_hdmi_set_color_depth(encoder); - radeon_audio_update_acr(encoder, mode->clock); - radeon_audio_set_audio_packet(encoder); - radeon_audio_select_pin(encoder); + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_audio_set_mute(encoder, true); - if (radeon_audio_set_avi_packet(encoder, mode) < 0) - return; + radeon_audio_write_speaker_allocation(encoder); + radeon_audio_write_sad_regs(encoder); + radeon_audio_write_latency_fields(encoder, mode); + radeon_audio_set_dto(encoder, mode->clock); + radeon_audio_set_vbi_packet(encoder); + radeon_hdmi_set_color_depth(encoder); + radeon_audio_update_acr(encoder, mode->clock); + radeon_audio_set_audio_packet(encoder); + radeon_audio_select_pin(encoder); + + if (radeon_audio_set_avi_packet(encoder, mode) < 0) + return; - radeon_audio_set_mute(encoder, false); + radeon_audio_set_mute(encoder, false); + } else { + radeon_hdmi_set_color_depth(encoder); + + if (radeon_audio_set_avi_packet(encoder, mode) < 0) + return; + } } static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, if (!dig || !dig->afmt) return; - radeon_audio_write_speaker_allocation(encoder); - radeon_audio_write_sad_regs(encoder); - radeon_audio_write_latency_fields(encoder, mode); - if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) - radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); - else - radeon_audio_set_dto(encoder, dig_connector->dp_clock); - radeon_audio_set_audio_packet(encoder); - radeon_audio_select_pin(encoder); - - if (radeon_audio_set_avi_packet(encoder, mode) < 0) + if (!connector) return; + + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_audio_write_speaker_allocation(encoder); + radeon_audio_write_sad_regs(encoder); + radeon_audio_write_latency_fields(encoder, mode); + if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) + radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); + else + radeon_audio_set_dto(encoder, dig_connector->dp_clock); + radeon_audio_set_audio_packet(encoder); + radeon_audio_select_pin(encoder); + + if (radeon_audio_set_avi_packet(encoder, mode) < 0) + return; + } } void radeon_audio_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h index 8438304f7139..059cc3012062 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.h +++ b/drivers/gpu/drm/radeon/radeon_audio.h @@ -68,7 +68,8 @@ struct radeon_audio_funcs int radeon_audio_init(struct radeon_device *rdev); void radeon_audio_detect(struct drm_connector *connector, - enum drm_connector_status status); + struct drm_encoder *encoder, + enum drm_connector_status status); u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, u32 offset, u32 reg); void radeon_audio_endpoint_wreg(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3e5f6b71f3ad..c097d3a82bda 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { + u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + + if (hss > lvds->native_mode.hdisplay) + hss = (10 - 1) * 8; + lvds->native_mode.htotal = lvds->native_mode.hdisplay + (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + - (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + hss; lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + (RBIOS8(tmp + 23) * 8); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index cebb65e07e1d..94b21ae70ef7 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1379,8 +1379,16 @@ out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) - radeon_audio_detect(connector, ret); + if ((radeon_audio != 0) && radeon_connector->use_digital) { + const struct drm_connector_helper_funcs *connector_funcs = + connector->helper_private; + + encoder = connector_funcs->best_encoder(connector); + if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) { + radeon_connector_get_edid(connector); + radeon_audio_detect(connector, encoder, ret); + } + } exit: pm_runtime_mark_last_busy(connector->dev->dev); @@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) - radeon_audio_detect(connector, ret); + if ((radeon_audio != 0) && encoder) { + radeon_connector_get_edid(connector); + radeon_audio_detect(connector, encoder, ret); + } out: pm_runtime_mark_last_busy(connector->dev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 07909d817381..aecc3e3dec0c 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -237,7 +237,6 @@ struct radeon_afmt { int offset; bool last_buffer_filled_status; int id; - struct r600_audio_pin *pin; }; struct radeon_mode_info { @@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig { uint8_t backlight_level; int panel_mode; struct radeon_afmt *afmt; + struct r600_audio_pin *pin; int active_mst_links; }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 654c8daeb5ab..97ad3bcb99a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true, NULL); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = vmw_validate_buffers(dev_priv, sw_context); if (unlikely(ret != 0)) @@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, vmw_resource_relocations_free(&sw_context->res_relocations); vmw_fifo_commit(dev_priv, command_size); + mutex_unlock(&dev_priv->binding_mutex); vmw_query_bo_switch_commit(dev_priv, sw_context); ret = vmw_execbuf_fence_commands(file_priv, dev_priv, @@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, DRM_ERROR("Fence submission error. Syncing.\n"); vmw_resource_list_unreserve(&sw_context->resource_list, false); - mutex_unlock(&dev_priv->binding_mutex); ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, (void *) fence); diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index f822fd2a1ada..884d82f9190e 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 157c62775053..e6fce23b121a 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, @@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b04b0820d816..b3b225b75d0a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -142,6 +142,9 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 3511bbaba505..e3c63640df73 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -462,12 +462,15 @@ out: static void hidinput_cleanup_battery(struct hid_device *dev) { + const struct power_supply_desc *psy_desc; + if (!dev->battery) return; + psy_desc = dev->battery->desc; power_supply_unregister(dev->battery); - kfree(dev->battery->desc->name); - kfree(dev->battery->desc); + kfree(psy_desc->name); + kfree(psy_desc); dev->battery = NULL; } #else /* !CONFIG_HID_BATTERY_STRENGTH */ diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 94167310e15a..b905d501e752 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev) for (p = drvdata->rdesc; p <= drvdata->rdesc + drvdata->rsize - 4;) { if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && - p[3] < sizeof(params)) { + p[3] < ARRAY_SIZE(params)) { v = params[p[3]]; put_unaligned(cpu_to_le32(v), (s32 *)p); p += 4; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 44958d79d598..01b937e63cf3 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1284,6 +1284,39 @@ fail_register_pen_input: return error; } +/* + * Not all devices report physical dimensions from HID. + * Compute the default from hardcoded logical dimension + * and resolution before driver overwrites them. + */ +static void wacom_set_default_phy(struct wacom_features *features) +{ + if (features->x_resolution) { + features->x_phy = (features->x_max * 100) / + features->x_resolution; + features->y_phy = (features->y_max * 100) / + features->y_resolution; + } +} + +static void wacom_calculate_res(struct wacom_features *features) +{ + /* set unit to "100th of a mm" for devices not reported by HID */ + if (!features->unit) { + features->unit = 0x11; + features->unitExpo = -3; + } + + features->x_resolution = wacom_calc_hid_res(features->x_max, + features->x_phy, + features->unit, + features->unitExpo); + features->y_resolution = wacom_calc_hid_res(features->y_max, + features->y_phy, + features->unit, + features->unitExpo); +} + static void wacom_wireless_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, work); @@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work) if (wacom_wac1->features.type != INTUOSHT && wacom_wac1->features.type != BAMBOO_PT) wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; + wacom_set_default_phy(&wacom_wac1->features); + wacom_calculate_res(&wacom_wac1->features); snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", wacom_wac1->features.name); snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", @@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_wac2->features = *((struct wacom_features *)id->driver_data); wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; + wacom_set_default_phy(&wacom_wac2->features); wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; + wacom_calculate_res(&wacom_wac2->features); snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, "%s (WL) Finger",wacom_wac2->features.name); snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, @@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work) } } -/* - * Not all devices report physical dimensions from HID. - * Compute the default from hardcoded logical dimension - * and resolution before driver overwrites them. - */ -static void wacom_set_default_phy(struct wacom_features *features) -{ - if (features->x_resolution) { - features->x_phy = (features->x_max * 100) / - features->x_resolution; - features->y_phy = (features->y_max * 100) / - features->y_resolution; - } -} - -static void wacom_calculate_res(struct wacom_features *features) -{ - /* set unit to "100th of a mm" for devices not reported by HID */ - if (!features->unit) { - features->unit = 0x11; - features->unitExpo = -3; - } - - features->x_resolution = wacom_calc_hid_res(features->x_max, - features->x_phy, - features->unit, - features->unitExpo); - features->y_resolution = wacom_calc_hid_res(features->y_max, - features->y_phy, - features->unit, - features->unitExpo); -} - static size_t wacom_compute_pktlen(struct hid_device *hdev) { struct hid_report_enum *report_enum; diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 37c16afe007a..c8487894b312 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); +static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { + { + /* + * CPU fan speed going up and down on Dell Studio XPS 8100 + * for unknown reasons. + */ + .ident = "Dell Studio XPS 8100", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), + }, + }, + { } +}; + /* * Probe for the presence of a supported laptop. */ @@ -940,7 +955,8 @@ static int __init i8k_probe(void) /* * Get DMI information */ - if (!dmi_check_system(i8k_dmi_table)) { + if (!dmi_check_system(i8k_dmi_table) || + dmi_check_system(i8k_blacklist_dmi_table)) { if (!ignore_dmi && !force) return -ENODEV; diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index 9b55e673b67c..85d106fe3ce8 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = { { .compatible = "gmt,g763" }, { }, }; +MODULE_DEVICE_TABLE(of, g762_dt_match); /* * Grab clock (a required property), enable it, get (fixed) clock frequency diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index 28fcb2e246d5..fbfc02bb2cfa 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -195,7 +195,7 @@ abort: } static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, - unsigned int voltage) + unsigned long voltage) { int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; int err; diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index b77b82f24480..08ff89d222e5 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev, return sprintf(buf, "%d\n", val); } -static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, - const char *buf, size_t count) +static ssize_t store_enable(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct nct7904_data *data = dev_get_drvdata(dev); @@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; - if (val > 1 || (val && !data->fan_mode[index])) + if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index])) return -EINVAL; ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, - val ? data->fan_mode[index] : 0); + val == 2 ? data->fan_mode[index] : 0); return ret ? ret : count; } -/* Return 0 for manual mode or 1 for SmartFan mode */ -static ssize_t show_mode(struct device *dev, - struct device_attribute *devattr, char *buf) +/* Return 1 for manual mode or 2 for SmartFan mode */ +static ssize_t show_enable(struct device *dev, + struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct nct7904_data *data = dev_get_drvdata(dev); @@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev, if (val < 0) return val; - return sprintf(buf, "%d\n", val ? 1 : 0); + return sprintf(buf, "%d\n", val ? 2 : 1); } /* 2 attributes per channel: pwm and mode */ -static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 0); -static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 0); -static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 0); +static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 1); -static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 1); -static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 1); +static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 2); -static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 2); -static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 2); +static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 3); -static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 3); +static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 3); static struct attribute *nct7904_fanctl_attrs[] = { - &sensor_dev_attr_fan1_pwm.dev_attr.attr, - &sensor_dev_attr_fan1_mode.dev_attr.attr, - &sensor_dev_attr_fan2_pwm.dev_attr.attr, - &sensor_dev_attr_fan2_mode.dev_attr.attr, - &sensor_dev_attr_fan3_pwm.dev_attr.attr, - &sensor_dev_attr_fan3_mode.dev_attr.attr, - &sensor_dev_attr_fan4_pwm.dev_attr.attr, - &sensor_dev_attr_fan4_mode.dev_attr.attr, + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_pwm1_enable.dev_attr.attr, + &sensor_dev_attr_pwm2.dev_attr.attr, + &sensor_dev_attr_pwm2_enable.dev_attr.attr, + &sensor_dev_attr_pwm3.dev_attr.attr, + &sensor_dev_attr_pwm3_enable.dev_attr.attr, + &sensor_dev_attr_pwm4.dev_attr.attr, + &sensor_dev_attr_pwm4_enable.dev_attr.attr, NULL }; @@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = { {"nct7904", 0}, {} }; +MODULE_DEVICE_TABLE(i2c, nct7904_id); static struct i2c_driver nct7904_driver = { .class = I2C_CLASS_HWMON, diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index af162b4c7a6d..025686d41640 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iface); - dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " + dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, " "regs_base@%p\n", iface->regs_base); return 0; @@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init); module_exit(i2c_bfin_twi_exit); MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); -MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); +MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:i2c-bfin-twi"); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index d1c22e3fdd14..fc9bf7f30e35 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* enable test mode */ reg |= OMAP_I2C_SYSTEST_ST_EN; + /* select SDA/SCL IO mode */ + reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT; + /* set SCL to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SCL_O; + /* set SDA to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } @@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* restore reset values */ reg &= ~OMAP_I2C_SYSTEST_ST_EN; + reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK; + reg &= ~OMAP_I2C_SYSTEST_SCL_O; + reg &= ~OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index e6d4935161e4..c83e4d13cfc5 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) if (bri->prepare_recovery) bri->prepare_recovery(adap); + bri->set_scl(adap, val); + ndelay(RECOVERY_NDELAY); + /* * By this time SCL is high, as we need to give 9 falling-rising edges */ @@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) int i2c_generic_scl_recovery(struct i2c_adapter *adap) { - adap->bus_recovery_info->set_scl(adap, 1); return i2c_generic_recovery(adap); } EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); @@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data) struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) { struct device *dev; + struct i2c_client *client; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_client(dev); + client = i2c_verify_client(dev); + if (!client) + put_device(dev); + + return client; } EXPORT_SYMBOL(of_find_i2c_device_by_node); @@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node); struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) { struct device *dev; + struct i2c_adapter *adapter; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_adapter(dev); + adapter = i2c_verify_adapter(dev); + if (!adapter) + put_device(dev); + + return adapter; } EXPORT_SYMBOL(of_find_i2c_adapter_by_node); #else diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 822374654609..1da449614779 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); @@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index b1b73232f217..bbbe0184e592 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) /* * T3 only supports 32 bits of size. */ + if (sizeof(phys_addr_t) > 4) { + pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n"); + return ERR_PTR(-ENOTSUPP); + } bl.size = 0xffffffff; bl.addr = 0; kva = 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index b396344fae16..6a36338593cd 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_H__ #define __OCRDMA_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h index 1554cca5712a..430b1350fe96 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_ABI_H__ #define __OCRDMA_ABI_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 29b27675dd70..44766fee1f4e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <net/neighbour.h> #include <net/netevent.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index cf366fe03cb8..04a30ae67473 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_AH_H__ #define __OCRDMA_AH_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 47615ff33bc6..aab391a15db4 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) CNA Adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <linux/sched.h> #include <linux/interrupt.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index e905972fceb7..7ed885c1851e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) CNA Adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_HW_H__ #define __OCRDMA_HW_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index d98a707a5eb9..b119a3413a15 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <linux/module.h> #include <linux/idr.h> @@ -46,7 +61,7 @@ MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); MODULE_AUTHOR("Emulex Corporation"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("Dual BSD/GPL"); static LIST_HEAD(ocrdma_dev_list); static DEFINE_SPINLOCK(ocrdma_devlist_lock); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 02ad0aee99af..80006b24aa11 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_SLI_H__ #define __OCRDMA_SLI_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 48d7ef51aa0c..69334e214571 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2014 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <rdma/ib_addr.h> #include <rdma/ib_pma.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h index 091edd68a8a3..c9e58d04c7b8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2014 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_STATS_H__ #define __OCRDMA_STATS_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 5bb61eb58f2c..bc84cd462ecf 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <linux/dma-mapping.h> #include <rdma/ib_verbs.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b15c608efa7b..eaccb2d3cb9f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_VERBS_H__ #define __OCRDMA_VERBS_H__ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 9e6ee82a8fd7..851c8219d501 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) else size += ipoib_recvq_size * ipoib_max_conn_qp; } else - goto out_free_wq; + if (ret != -ENOSYS) + goto out_free_wq; cq_attr.cqe = size; priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 771700963127..d851e1828d6f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ret = isert_rdma_post_recvl(isert_conn); if (ret) goto out_conn_dev; + /* + * Obtain the second reference now before isert_rdma_accept() to + * ensure that any initiator generated REJECT CM event that occurs + * asynchronously won't drop the last reference until the error path + * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() -> + * isert_free_conn() -> isert_put_conn() -> kref_put(). + */ + if (!kref_get_unless_zero(&isert_conn->kref)) { + isert_warn("conn %p connect_release is running\n", isert_conn); + goto out_conn_dev; + } ret = isert_rdma_accept(isert_conn); if (ret) @@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id) isert_info("conn %p\n", isert_conn); - if (!kref_get_unless_zero(&isert_conn->kref)) { - isert_warn("conn %p connect_release is running\n", isert_conn); - return; - } - mutex_lock(&isert_conn->mutex); if (isert_conn->state != ISER_CONN_FULL_FEATURE) isert_conn->state = ISER_CONN_UP; diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index 27b6a3ce18ca..891797ad76bc 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) if (n_buttons[i] < 1) continue; - if (n_buttons[i] > 6) { + if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) { printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); err = -EINVAL; goto err_unreg_devs; diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 10e140af5aac..1ac898db303a 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver); MODULE_DESCRIPTION("axp20x Power Button"); MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:axp20x-pek"); diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index fc17b9592f54..10c4e3d462f1 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, if (pdata && pdata->coexist) return true; - if (of_find_node_by_name(node, "codec")) { + node = of_find_node_by_name(node, "codec"); + if (node) { of_node_put(node); return true; } diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 113d6f1516a5..4d246861d692 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -20,6 +20,7 @@ #include <linux/input/mt.h> #include <linux/serio.h> #include <linux/libps2.h> +#include <linux/dmi.h> #include "psmouse.h" #include "alps.h" @@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ +#define ALPS_DELL 0x100 /* device is a Dell laptop */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { @@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse) return; } - /* Non interleaved V2 dualpoint has separate stick button bits */ + /* Dell non interleaved V2 dualpoint has separate stick button bits */ if (priv->proto_version == ALPS_PROTO_V2 && - priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { + priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) { left |= packet[0] & 1; right |= packet[0] & 2; middle |= packet[0] & 4; @@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse, priv->byte0 = protocol->byte0; priv->mask0 = protocol->mask0; priv->flags = protocol->flags; + if (dmi_name_in_vendors("Dell")) + priv->flags |= ALPS_DELL; priv->x_max = 2000; priv->y_max = 1400; diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index b10709f04615..30e3442518f8 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -2,6 +2,7 @@ * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver * * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) + * Copyright (C) 2015 John Horan (knasher@gmail.com) * * The USB initialization and package decoding was made by * Scott Shawcroft as part of the touchd user-space driver project: @@ -91,6 +92,10 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 +/* MacbookPro12,1 (2015) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ @@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), + /* MacbookPro12,1 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), /* Terminating entry */ {} }; @@ -180,21 +189,47 @@ struct bt_data { enum tp_type { TYPE1, /* plain trackpad */ TYPE2, /* button integrated in trackpad */ - TYPE3 /* additional header fields since June 2013 */ + TYPE3, /* additional header fields since June 2013 */ + TYPE4 /* additional header field for pressure data */ }; /* trackpad finger data offsets, le16-aligned */ -#define FINGER_TYPE1 (13 * sizeof(__le16)) -#define FINGER_TYPE2 (15 * sizeof(__le16)) -#define FINGER_TYPE3 (19 * sizeof(__le16)) +#define HEADER_TYPE1 (13 * sizeof(__le16)) +#define HEADER_TYPE2 (15 * sizeof(__le16)) +#define HEADER_TYPE3 (19 * sizeof(__le16)) +#define HEADER_TYPE4 (23 * sizeof(__le16)) /* trackpad button data offsets */ +#define BUTTON_TYPE1 0 #define BUTTON_TYPE2 15 #define BUTTON_TYPE3 23 +#define BUTTON_TYPE4 31 /* list of device capability bits */ #define HAS_INTEGRATED_BUTTON 1 +/* trackpad finger data block size */ +#define FSIZE_TYPE1 (14 * sizeof(__le16)) +#define FSIZE_TYPE2 (14 * sizeof(__le16)) +#define FSIZE_TYPE3 (14 * sizeof(__le16)) +#define FSIZE_TYPE4 (15 * sizeof(__le16)) + +/* offset from header to finger struct */ +#define DELTA_TYPE1 (0 * sizeof(__le16)) +#define DELTA_TYPE2 (0 * sizeof(__le16)) +#define DELTA_TYPE3 (0 * sizeof(__le16)) +#define DELTA_TYPE4 (1 * sizeof(__le16)) + +/* usb control message mode switch data */ +#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8 +#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8 +#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8 +#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0 + +/* Wellspring initialization constants */ +#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 +#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 + /* trackpad finger structure, le16-aligned */ struct tp_finger { __le16 origin; /* zero when switching track finger */ @@ -207,14 +242,13 @@ struct tp_finger { __le16 orientation; /* 16384 when point, else 15 bit angle */ __le16 touch_major; /* touch area, major axis */ __le16 touch_minor; /* touch area, minor axis */ - __le16 unused[3]; /* zeros */ + __le16 unused[2]; /* zeros */ + __le16 pressure; /* pressure on forcetouch touchpad */ __le16 multi; /* one finger: varies, more fingers: constant */ } __attribute__((packed,aligned(2))); /* trackpad finger data size, empirically at least ten fingers */ #define MAX_FINGERS 16 -#define SIZEOF_FINGER sizeof(struct tp_finger) -#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER) #define MAX_FINGER_ORIENTATION 16384 /* device-specific parameters */ @@ -232,8 +266,17 @@ struct bcm5974_config { int bt_datalen; /* data length of the button interface */ int tp_ep; /* the endpoint of the trackpad interface */ enum tp_type tp_type; /* type of trackpad interface */ - int tp_offset; /* offset to trackpad finger data */ + int tp_header; /* bytes in header block */ int tp_datalen; /* data length of the trackpad interface */ + int tp_button; /* offset to button data */ + int tp_fsize; /* bytes in single finger block */ + int tp_delta; /* offset from header to finger struct */ + int um_size; /* usb control message length */ + int um_req_val; /* usb control message value */ + int um_req_idx; /* usb control message index */ + int um_switch_idx; /* usb control message mode switch index */ + int um_switch_on; /* usb control message mode switch on */ + int um_switch_off; /* usb control message mode switch off */ struct bcm5974_param p; /* finger pressure limits */ struct bcm5974_param w; /* finger width limits */ struct bcm5974_param x; /* horizontal limits */ @@ -259,6 +302,24 @@ struct bcm5974 { int slots[MAX_FINGERS]; /* slot assignments */ }; +/* trackpad finger block data, le16-aligned */ +static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i) +{ + const struct bcm5974_config *c = &dev->cfg; + u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta; + + return (const struct tp_finger *)(f_base + i * c->tp_fsize); +} + +#define DATAFORMAT(type) \ + type, \ + HEADER_##type, \ + HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \ + BUTTON_##type, \ + FSIZE_##type, \ + DELTA_##type, \ + USBMSG_##type + /* logical signal quality */ #define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ #define SN_WIDTH 25 /* width signal-to-noise ratio */ @@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 0, 0x84, sizeof(struct bt_data), - 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE1), { SN_PRESSURE, 0, 256 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4824, 5342 }, @@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 0, 0x84, sizeof(struct bt_data), - 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE1), { SN_PRESSURE, 0, 256 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4824, 4824 }, @@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4460, 5166 }, @@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, @@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4616, 5112 }, @@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4415, 5050 }, @@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, @@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, @@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, @@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, @@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, @@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, HAS_INTEGRATED_BUTTON, 0, sizeof(struct bt_data), - 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, + 0x83, DATAFORMAT(TYPE3), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, { SN_COORD, -150, 6600 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING9_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING9_JIS, + HAS_INTEGRATED_BUTTON, + 0, sizeof(struct bt_data), + 0x83, DATAFORMAT(TYPE4), + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4828, 5345 }, + { SN_COORD, -203, 6803 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } + }, {} }; @@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size) struct input_dev *input = dev->input; int raw_n, i, n = 0; - if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) + if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0) return -EIO; - /* finger data, le16-aligned */ - f = (const struct tp_finger *)(dev->tp_data + c->tp_offset); - raw_n = (size - c->tp_offset) / SIZEOF_FINGER; + raw_n = (size - c->tp_header) / c->tp_fsize; for (i = 0; i < raw_n; i++) { - if (raw2int(f[i].touch_major) == 0) + f = get_tp_finger(dev, i); + if (raw2int(f->touch_major) == 0) continue; - dev->pos[n].x = raw2int(f[i].abs_x); - dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); - dev->index[n++] = &f[i]; + dev->pos[n].x = raw2int(f->abs_x); + dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y); + dev->index[n++] = f; } input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); @@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size) input_mt_sync_frame(input); - report_synaptics_data(input, c, f, raw_n); + report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n); - /* type 2 reports button events via ibt only */ - if (c->tp_type == TYPE2) { - int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); + /* later types report button events via integrated button only */ + if (c->caps & HAS_INTEGRATED_BUTTON) { + int ibt = raw2int(dev->tp_data[c->tp_button]); input_report_key(input, BTN_LEFT, ibt); } - if (c->tp_type == TYPE3) - input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]); - input_sync(input); return 0; } -/* Wellspring initialization constants */ -#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 -#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 -#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 -#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 -#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 -#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08 - static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) { + const struct bcm5974_config *c = &dev->cfg; int retval = 0, size; char *data; @@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) if (dev->cfg.tp_type == TYPE3) return 0; - data = kmalloc(8, GFP_KERNEL); + data = kmalloc(c->um_size, GFP_KERNEL); if (!data) { dev_err(&dev->intf->dev, "out of memory\n"); retval = -ENOMEM; @@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - BCM5974_WELLSPRING_MODE_REQUEST_VALUE, - BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); + c->um_req_val, c->um_req_idx, data, c->um_size, 5000); - if (size != 8) { + if (size != c->um_size) { dev_err(&dev->intf->dev, "could not read from device\n"); retval = -EIO; goto out; } /* apply the mode switch */ - data[0] = on ? - BCM5974_WELLSPRING_MODE_VENDOR_VALUE : - BCM5974_WELLSPRING_MODE_NORMAL_VALUE; + data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off; /* write configuration */ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - BCM5974_WELLSPRING_MODE_REQUEST_VALUE, - BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); + c->um_req_val, c->um_req_idx, data, c->um_size, 5000); - if (size != 8) { + if (size != c->um_size) { dev_err(&dev->intf->dev, "could not write to device\n"); retval = -EIO; goto out; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 22b9ca901f4e..2955f1d0ca6c 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; unsigned char packet_type = packet[3] & 0x03; + unsigned int ic_version; bool sanity_check; if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) return PACKET_TRACKPOINT; + /* This represents the version of IC body. */ + ic_version = (etd->fw_version & 0x0f0000) >> 16; + /* * Sanity check based on the constant bits of a packet. * The constant bits change depending on the value of - * the hardware flag 'crc_enabled' but are the same for - * every packet, regardless of the type. + * the hardware flag 'crc_enabled' and the version of + * the IC body, but are the same for every packet, + * regardless of the type. */ if (etd->crc_enabled) sanity_check = ((packet[3] & 0x08) == 0x00); + else if (ic_version == 7 && etd->samples[1] == 0x2A) + sanity_check = ((packet[3] & 0x1c) == 0x10); else sanity_check = ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0x1c) == 0x10); @@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Avatar AVIU-145A2 0x361f00 ? clickpad * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons + * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) @@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse) etd->capabilities[0], etd->capabilities[1], etd->capabilities[2]); + if (etd->hw_version != 1) { + if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) { + psmouse_err(psmouse, "failed to query sample data\n"); + goto init_fail; + } + psmouse_info(psmouse, + "Elan sample query result %02x, %02x, %02x\n", + etd->samples[0], etd->samples[1], etd->samples[2]); + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad into absolute mode.\n"); diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index f965d1569cc3..e1cbf409d9c8 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -129,6 +129,7 @@ struct elantech_data { unsigned char reg_26; unsigned char debug; unsigned char capabilities[3]; + unsigned char samples[3]; bool paritycheck; bool jumpy_cursor; bool reports_pressure; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 3a32caf06bf1..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; psmouse_info(psmouse, - "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", + "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", SYN_ID_MODEL(priv->identity), SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c, - priv->board_id, priv->firmware_id); + priv->ext_cap_10, priv->board_id, priv->firmware_id); set_input_params(psmouse, priv); diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index 80285c71786e..f58a196521a9 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -429,7 +429,7 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf) goto unlock; } - if (buf[PAYLOAD_LENGTH] == 0 || buf[PAYLOAD_LENGTH] > FRAME_MAXSIZE) { + if (buf[PAYLOAD_LENGTH] == 0) { dev_err(&client->dev, "invalid payload length: %d\n", buf[PAYLOAD_LENGTH]); ret = -EIO; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a57e9b749895..658ee39e6569 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -76,8 +76,6 @@ LIST_HEAD(hpet_map); * Domain for untranslated devices - only allocated * if iommu=pt passed on kernel cmd line. */ -static struct protection_domain *pt_domain; - static const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); @@ -96,7 +94,7 @@ struct iommu_dev_data { struct protection_domain *domain; /* Domain the device is bound to */ u16 devid; /* PCI Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ - bool passthrough; /* Default for device is pt_domain */ + bool passthrough; /* Device is identity mapped */ struct { bool enabled; int qdep; @@ -116,7 +114,6 @@ struct iommu_cmd { struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); -static int alloc_passthrough_domain(void); static int protection_domain_init(struct protection_domain *domain); /**************************************************************************** @@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev, dev_data = get_dev_data(dev); if (domain->flags & PD_IOMMUV2_MASK) { - if (!dev_data->iommu_v2 || !dev_data->passthrough) + if (!dev_data->passthrough) return -EINVAL; - if (pdev_iommuv2_enable(pdev) != 0) - return -EINVAL; + if (dev_data->iommu_v2) { + if (pdev_iommuv2_enable(pdev) != 0) + return -EINVAL; - dev_data->ats.enabled = true; - dev_data->ats.qdep = pci_ats_queue_depth(pdev); - dev_data->pri_tlp = pci_pri_tlp_required(pdev); + dev_data->ats.enabled = true; + dev_data->ats.qdep = pci_ats_queue_depth(pdev); + dev_data->pri_tlp = pci_pri_tlp_required(pdev); + } } else if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { dev_data->ats.enabled = true; @@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data) do_detach(head); spin_unlock_irqrestore(&domain->lock, flags); - - /* - * If we run in passthrough mode the device must be assigned to the - * passthrough domain if it is detached from any other domain. - * Make sure we can deassign from the pt_domain itself. - */ - if (dev_data->passthrough && - (dev_data->domain == NULL && domain != pt_domain)) - __attach_device(dev_data, pt_domain); } /* @@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev) __detach_device(dev_data); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - if (domain->flags & PD_IOMMUV2_MASK) + if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) pdev_iommuv2_disable(to_pci_dev(dev)); else if (dev_data->ats.enabled) pci_disable_ats(to_pci_dev(dev)); @@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev) BUG_ON(!dev_data); - if (dev_data->iommu_v2) + if (iommu_pass_through || dev_data->iommu_v2) iommu_request_dm_for_dev(dev); /* Domains are initialized for this device - have a look what we ended up with */ domain = iommu_get_domain_for_dev(dev); - if (domain->type == IOMMU_DOMAIN_IDENTITY) { + if (domain->type == IOMMU_DOMAIN_IDENTITY) dev_data->passthrough = true; - dev->archdata.dma_ops = &nommu_dma_ops; - } else { + else dev->archdata.dma_ops = &amd_iommu_dma_ops; - } out: iommu_completion_wait(iommu); @@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void) int __init amd_iommu_init_dma_ops(void) { + swiotlb = iommu_pass_through ? 1 : 0; iommu_detected = 1; - swiotlb = 0; + + /* + * In case we don't initialize SWIOTLB (actually the common case + * when AMD IOMMU is enabled), make sure there are global + * dma_ops set as a fall-back for devices not handled by this + * driver (for example non-PCI devices). + */ + if (!swiotlb) + dma_ops = &nommu_dma_ops; amd_iommu_stats_init(); @@ -2947,21 +2944,6 @@ out_err: return NULL; } -static int alloc_passthrough_domain(void) -{ - if (pt_domain != NULL) - return 0; - - /* allocate passthrough domain */ - pt_domain = protection_domain_alloc(); - if (!pt_domain) - return -ENOMEM; - - pt_domain->mode = PAGE_MODE_NONE; - - return 0; -} - static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *pdomain; @@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = { * *****************************************************************************/ -int __init amd_iommu_init_passthrough(void) -{ - struct iommu_dev_data *dev_data; - struct pci_dev *dev = NULL; - int ret; - - ret = alloc_passthrough_domain(); - if (ret) - return ret; - - for_each_pci_dev(dev) { - if (!check_device(&dev->dev)) - continue; - - dev_data = get_dev_data(&dev->dev); - dev_data->passthrough = true; - - attach_device(&dev->dev, pt_domain); - } - - amd_iommu_stats_init(); - - pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); - - return 0; -} - /* IOMMUv2 specific functions */ int amd_iommu_register_ppr_notifier(struct notifier_block *nb) { @@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid, struct amd_iommu *iommu; int qdep; - BUG_ON(!dev_data->ats.enabled); + /* + There might be non-IOMMUv2 capable devices in an IOMMUv2 + * domain. + */ + if (!dev_data->ats.enabled) + continue; qdep = dev_data->ats.qdep; iommu = amd_iommu_rlookup_table[dev_data->devid]; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index dbda9ae68c5d..a24495eb4e26 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2026,14 +2026,6 @@ static bool detect_ivrs(void) return true; } -static int amd_iommu_init_dma(void) -{ - if (iommu_pass_through) - return amd_iommu_init_passthrough(); - else - return amd_iommu_init_dma_ops(); -} - /**************************************************************************** * * AMD IOMMU Initialization State Machine @@ -2073,7 +2065,7 @@ static int __init state_next(void) init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; break; case IOMMU_INTERRUPTS_EN: - ret = amd_iommu_init_dma(); + ret = amd_iommu_init_dma_ops(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; break; case IOMMU_DMA_OPS: diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 3465faf1809e..f7b875bb70d4 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid) static void free_device_state(struct device_state *dev_state) { + struct iommu_group *group; + /* * First detach device from domain - No more PRI requests will arrive * from that device after it is unbound from the IOMMUv2 domain. */ - iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); + group = iommu_group_get(&dev_state->pdev->dev); + if (WARN_ON(!group)) + return; + + iommu_detach_group(dev_state->domain, group); + + iommu_group_put(group); /* Everything is down now, free the IOMMUv2 domain */ iommu_domain_free(dev_state->domain); @@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid); int amd_iommu_init_device(struct pci_dev *pdev, int pasids) { struct device_state *dev_state; + struct iommu_group *group; unsigned long flags; int ret, tmp; u16 devid; @@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) if (ret) goto out_free_domain; - ret = iommu_attach_device(dev_state->domain, &pdev->dev); - if (ret != 0) + group = iommu_group_get(&pdev->dev); + if (!group) goto out_free_domain; + ret = iommu_attach_group(dev_state->domain, group); + if (ret != 0) + goto out_drop_group; + + iommu_group_put(group); + spin_lock_irqsave(&state_lock, flags); if (__get_device_state(devid) != NULL) { @@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) return 0; +out_drop_group: + iommu_group_put(group); + out_free_domain: iommu_domain_free(dev_state->domain); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index b7d54d428b5e..ff4be0515a0d 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index 1a57e88a38f7..cd35079c8c98 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -7,7 +7,7 @@ #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> -#include <linux/init.h> +#include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index b59727309072..bfec3bdfe598 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -259,7 +259,7 @@ config DM_CRYPT the ciphers you're going to use in the cryptoapi configuration. For further information on dm-crypt and userspace tools see: - <http://code.google.com/p/cryptsetup/wiki/DMCrypt> + <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt> To compile this code as a module, choose M here: the module will be called dm-crypt. diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 32814371b8d3..aa1b41ca40f7 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -1471,5 +1471,3 @@ module_exit(mq_exit); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("mq cache policy"); - -MODULE_ALIAS("dm-cache-default"); diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index b6f22651dd35..200366c62231 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, if (from_cblock(cache_size)) { mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); - if (!mq->cache_hit_bits && mq->cache_hit_bits) { + if (!mq->cache_hit_bits) { DMERR("couldn't allocate cache hit bitset"); goto bad_cache_hit_bits; } @@ -1789,3 +1789,5 @@ module_exit(smq_exit); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("smq cache policy"); + +MODULE_ALIAS("dm-cache-default"); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index b680da5d7b93..1fe93cfea7d3 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg) wake_up(&cache->migration_wait); mempool_free(mg, cache->migration_pool); + wake_worker(cache); } static int prealloc_data_structs(struct cache *cache, struct prealloc *p) @@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache) * this bio might require one, we pause until there are some * prepared mappings to process. */ + prealloc_used = true; if (prealloc_data_structs(cache, &structs)) { spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&cache->deferred_bios, &bios); @@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache) process_discard_bio(cache, &structs, bio); else process_bio(cache, &structs, bio); - prealloc_used = true; } if (prealloc_used) @@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache) * this bio might require one, we pause until there are some * prepared mappings to process. */ + prealloc_used = true; if (prealloc_data_structs(cache, &structs)) { spin_lock_irqsave(&cache->lock, flags); list_splice(&cells, &cache->deferred_cells); @@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache) } process_cell(cache, &structs, cell); - prealloc_used = true; } if (prealloc_used) @@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache) if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) break; /* no work to do */ + prealloc_used = true; if (prealloc_data_structs(cache, &structs) || get_cell(cache, oblock, &structs, &old_ocell)) { policy_set_dirty(cache->policy, oblock); @@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache) } writeback(cache, &structs, oblock, cblock, old_ocell); - prealloc_used = true; } if (prealloc_used) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 48dfe3c4d6aa..6ba47cfb1443 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) return r; disk_super = dm_block_data(copy); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); + dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); + dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); dm_sm_dec_block(pmd->metadata_sm, held_root); return dm_tm_unlock(pmd->tm, copy); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1c50c580215c..d2bbe8cc1e97 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -666,16 +666,21 @@ static void requeue_io(struct thin_c *tc) requeue_deferred_cells(tc); } -static void error_retry_list(struct pool *pool) +static void error_retry_list_with_code(struct pool *pool, int error) { struct thin_c *tc; rcu_read_lock(); list_for_each_entry_rcu(tc, &pool->active_thins, list) - error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); + error_thin_bio_list(tc, &tc->retry_on_resume_list, error); rcu_read_unlock(); } +static void error_retry_list(struct pool *pool) +{ + return error_retry_list_with_code(pool, -EIO); +} + /* * This section of code contains the logic for processing a thin device's IO. * Much of the code depends on pool object resources (lists, workqueues, etc) @@ -2297,7 +2302,7 @@ static void do_no_space_timeout(struct work_struct *ws) if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { pool->pf.error_if_no_space = true; notify_of_pool_mode_change_to_oods(pool); - error_retry_list(pool); + error_retry_list_with_code(pool, -ENOSPC); } } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ab37ae114e94..0d7ab20c58df 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q, struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_live_table_fast(md); struct dm_target *ti; - sector_t max_sectors, max_size = 0; + sector_t max_sectors; + int max_size = 0; if (unlikely(!map)) goto out; @@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q, * Find maximum amount of I/O that won't need splitting */ max_sectors = min(max_io_len(bvm->bi_sector, ti), - (sector_t) queue_max_sectors(q)); + (sector_t) BIO_MAX_SECTORS); max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; - - /* - * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t - * to the targets' merge function since it holds sectors not bytes). - * Just doing this as an interim fix for stable@ because the more - * comprehensive cleanup of switching to sector_t will impact every - * DM target that implements a ->merge hook. - */ - if (max_size > INT_MAX) - max_size = INT_MAX; + if (max_size < 0) + max_size = 0; /* * merge_bvec_fn() returns number of bytes @@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q, * max is precomputed maximal io size */ if (max_size && ti->type->merge) - max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); + max_size = ti->type->merge(ti, bvm, biovec, max_size); /* * If the target doesn't support merge method and some of the devices - * provided their merge_bvec method (we know this by looking for the - * max_hw_sectors that dm_set_device_limits may set), then we can't - * allow bios with multiple vector entries. So always set max_size - * to 0, and the code below allows just one page. + * provided their merge_bvec method (we know this by looking at + * queue_max_hw_sectors), then we can't allow bios with multiple vector + * entries. So always set max_size to 0, and the code below allows + * just one page. */ else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) max_size = 0; diff --git a/drivers/md/md.c b/drivers/md/md.c index 0c2a4e8b873c..e25f00f0138a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg) char *ptr; int err; - file = kmalloc(sizeof(*file), GFP_NOIO); + file = kzalloc(sizeof(*file), GFP_NOIO); if (!file) return -ENOMEM; diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index bf2b80d5c470..8731b6ea026b 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key); extern struct dm_block_validator btree_node_validator; +/* + * Value type for upper levels of multi-level btrees. + */ +extern void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt); + #endif /* DM_BTREE_INTERNAL_H */ diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 9836c0ae897c..4222f774cf36 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, return r; } -static struct dm_btree_value_type le64_type = { - .context = NULL, - .size = sizeof(__le64), - .inc = NULL, - .dec = NULL, - .equal = NULL -}; - int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { @@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < info->levels; level++) { r = remove_raw(&spine, info, (level == last_level ? - &info->value_type : &le64_type), + &info->value_type : &le64_vt), root, keys[level], (unsigned *)&index); if (r < 0) break; @@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; uint64_t k; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < last_level; level++) { - r = remove_raw(&spine, info, &le64_type, + r = remove_raw(&spine, info, &le64_vt, root, keys[level], (unsigned *) &index); if (r < 0) goto out; @@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, value_ptr(n, index)); delete_at(n, index); + keys[last_level] = k + 1ull; } else r = -ENODATA; diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index 1b5e13ec7f96..0dee514ba4c5 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s) { return s->root; } + +static void le64_inc(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_inc(tm, le64_to_cpu(v_le)); +} + +static void le64_dec(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_dec(tm, le64_to_cpu(v_le)); +} + +static int le64_equal(void *context, const void *value1_le, const void *value2_le) +{ + __le64 v1_le, v2_le; + + memcpy(&v1_le, value1_le, sizeof(v1_le)); + memcpy(&v2_le, value2_le, sizeof(v2_le)); + return v1_le == v2_le; +} + +void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt) +{ + vt->context = tm; + vt->size = sizeof(__le64); + vt->inc = le64_inc; + vt->dec = le64_dec; + vt->equal = le64_equal; +} diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index fdd3793e22f9..c7726cebc495 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root, struct btree_node *n; struct dm_btree_value_type le64_type; - le64_type.context = NULL; - le64_type.size = sizeof(__le64); - le64_type.inc = NULL; - le64_type.dec = NULL; - le64_type.equal = NULL; - + init_le64_type(info->tm, &le64_type); init_shadow_spine(&spine, info); for (level = 0; level < (info->levels - 1); level++) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 94f5b55069e0..967a4ed73929 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; + unsigned long flags; /* * If it is not operational, then we have already marked it as dead @@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) return; } set_bit(Blocked, &rdev->flags); + spin_lock_irqsave(&conf->device_lock, flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; set_bit(Faulty, &rdev->flags); - spin_unlock_irqrestore(&conf->device_lock, flags); } else set_bit(Faulty, &rdev->flags); + spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. */ @@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev) * Find all failed disks within the RAID1 configuration * and mark them readable. * Called under mddev lock, so rcu protection not needed. + * device_lock used to avoid races with raid1_end_read_request + * which expects 'In_sync' flags and ->degraded to be consistent. */ + spin_lock_irqsave(&conf->device_lock, flags); for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = conf->mirrors[i].rdev; struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; @@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev) sysfs_notify_dirent_safe(rdev->sysfs_state); } } - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; spin_unlock_irqrestore(&conf->device_lock, flags); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 643d217bfa13..f757023fc458 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) static int drop_one_stripe(struct r5conf *conf) { struct stripe_head *sh; - int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; + int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; spin_lock_irq(conf->hash_locks + hash); sh = get_free_stripe(conf, hash); @@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, if (mutex_trylock(&conf->cache_size_mutex)) { ret= 0; - while (ret < sc->nr_to_scan) { + while (ret < sc->nr_to_scan && + conf->max_nr_stripes > conf->min_nr_stripes) { if (drop_one_stripe(conf) == 0) { ret = SHRINK_STOP; break; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 3a27a84ad3ec..9426276dbe14 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void) { int i; + if (!gpmc_base) + return; + gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); @@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void) { int i; + if (!gpmc_base) + return; + gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 653815950aa2..3f68dd251ce8 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C config MFD_CROS_EC_SPI tristate "ChromeOS Embedded Controller (SPI)" - depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF + depends on MFD_CROS_EC && CROS_EC_PROTO && SPI ---help--- If you say Y here, you get support for talking to the ChromeOS EC diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index bebf58a06a6b..a72ddb295078 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev) arizona->has_fully_powered_off = true; - disable_irq(arizona->irq); + disable_irq_nosync(arizona->irq); arizona_enable_reset(arizona); regulator_bulk_disable(arizona->num_core_supplies, arizona->core_supplies); @@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.gpio_defaults[i]); } - pm_runtime_set_autosuspend_delay(arizona->dev, 100); - pm_runtime_use_autosuspend(arizona->dev); - pm_runtime_enable(arizona->dev); - /* Chip default */ if (!arizona->pdata.clk32k_src) arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; @@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.spk_fmt[i]); } + pm_runtime_set_active(arizona->dev); + pm_runtime_enable(arizona->dev); + /* Set up for interrupts */ ret = arizona_irq_init(arizona); if (ret != 0) goto err_reset; + pm_runtime_set_autosuspend_delay(arizona->dev, 100); + pm_runtime_use_autosuspend(arizona->dev); + arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", arizona_clkgen_err, arizona); arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", @@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona) goto err_irq; } -#ifdef CONFIG_PM - regulator_disable(arizona->dcvdd); -#endif - return 0; err_irq: diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 2d3db81be099..6ded3dc36644 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, { struct at24_data *at24; - if (unlikely(off >= attr->size)) - return -EFBIG; - at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); return at24_write(at24, buf, off, count); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e1ccefce9a9d..a98dd4f1b0e3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond) slave ? slave->dev->name : "NULL"); if (!slave || !bond->send_peer_notif || + !netif_carrier_ok(bond->dev) || test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 2d1ce3c5d0dd..753887d02b46 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev) vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { - int j; pr_emerg("%s: no memory for rx ring\n", dev->name); - for (j = 0; j < i; j++) { - if (vp->rx_skbuff[j]) { - dev_kfree_skb(vp->rx_skbuff[j]); - vp->rx_skbuff[j] = NULL; - } - } retval = -ENOMEM; - goto err_free_irq; + goto err_free_skb; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); @@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev) if (!retval) goto out; -err_free_irq: +err_free_skb: + for (i = 0; i < RX_RING_SIZE; i++) { + if (vp->rx_skbuff[i]) { + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } free_irq(dev->irq, dev); err: if (vortex_debug > 1) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a90d7364334f..f7fbdc9d1325 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, if (likely(skb)) { (*pkts_compl)++; (*bytes_compl) += skb->len; + dev_kfree_skb_any(skb); } - dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 76b9052a961c..5907c821d131 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, offset += sizeof(u32); data_buf += sizeof(u32); written_so_far += sizeof(u32); + + /* At end of each 4Kb page, release nvram lock to allow MFW + * chance to take it for its own use. + */ + if ((cmd_flags & MCPR_NVM_COMMAND_LAST) && + (written_so_far < buf_size)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Releasing NVM lock after offset 0x%x\n", + (u32)(offset - sizeof(u32))); + bnx2x_release_nvram_lock(bp); + usleep_range(1000, 2000); + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + } + cmd_flags = 0; } diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 0612b19f6313..506047c38607 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) if (!next_cmpl->valid) break; } + packets++; /* TODO: BNA_CQ_EF_LOCAL ? */ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | @@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) else bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); - packets++; rcb->rxq->rx_packets++; rcb->rxq->rx_bytes += totlen; ccb->bytes_per_intr += totlen; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index caeb39561567..bf9eb2ecf960 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index) return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); } +/* I/O accessors */ +static u32 hw_readl_native(struct macb *bp, int offset) +{ + return __raw_readl(bp->regs + offset); +} + +static void hw_writel_native(struct macb *bp, int offset, u32 value) +{ + __raw_writel(value, bp->regs + offset); +} + +static u32 hw_readl(struct macb *bp, int offset) +{ + return readl_relaxed(bp->regs + offset); +} + +static void hw_writel(struct macb *bp, int offset, u32 value) +{ + writel_relaxed(value, bp->regs + offset); +} + +/* + * Find the CPU endianness by using the loopback bit of NCR register. When the + * CPU is in big endian we need to program swaped mode for management + * descriptor access. + */ +static bool hw_is_native_io(void __iomem *addr) +{ + u32 value = MACB_BIT(LLB); + + __raw_writel(value, addr + MACB_NCR); + value = __raw_readl(addr + MACB_NCR); + + /* Write 0 back to disable everything */ + __raw_writel(0, addr + MACB_NCR); + + return value == MACB_BIT(LLB); +} + +static bool hw_is_gem(void __iomem *addr, bool native_io) +{ + u32 id; + + if (native_io) + id = __raw_readl(addr + MACB_MID); + else + id = readl_relaxed(addr + MACB_MID); + + return MACB_BFEXT(IDNUM, id) >= 0x2; +} + static void macb_set_hwaddr(struct macb *bp) { u32 bottom; @@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp) } } - netdev_info(bp->dev, "invalid hw address, using random\n"); + dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); eth_hw_addr_random(bp->dev); } @@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev) struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; unsigned long flags; - int status_change = 0; spin_lock_irqsave(&bp->lock, flags); @@ -449,14 +499,14 @@ err_out: static void macb_update_stats(struct macb *bp) { - u32 __iomem *reg = bp->regs + MACB_PFR; u32 *p = &bp->hw_stats.macb.rx_pause_frames; u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; + int offset = MACB_PFR; WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); - for(; p < end; p++, reg++) - *p += readl_relaxed(reg); + for(; p < end; p++, offset += 4) + *p += bp->macb_reg_readl(bp, offset); } static int macb_halt_tx(struct macb *bp) @@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev) } #endif -static inline unsigned int macb_count_tx_descriptors(struct macb *bp, - unsigned int len) -{ - return (len + bp->max_tx_length - 1) / bp->max_tx_length; -} - static unsigned int macb_tx_map(struct macb *bp, struct macb_queue *queue, struct sk_buff *skb) @@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) * socket buffer: skb fragments of jumbo frames may need to be * splitted into many buffer descriptors. */ - count = macb_count_tx_descriptors(bp, skb_headlen(skb)); + count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); - count += macb_count_tx_descriptors(bp, frag_size); + count += DIV_ROUND_UP(frag_size, bp->max_tx_length); } spin_lock_irqsave(&bp->lock, flags); @@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp) static void macb_configure_dma(struct macb *bp) { u32 dmacfg; - u32 tmp, ncr; if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); @@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp) dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); dmacfg &= ~GEM_BIT(ENDIA_PKT); - /* Find the CPU endianness by using the loopback bit of net_ctrl - * register. save it first. When the CPU is in big endian we - * need to program swaped mode for management descriptor access. - */ - ncr = macb_readl(bp, NCR); - __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); - tmp = __raw_readl(bp->regs + MACB_NCR); - - if (tmp == MACB_BIT(LLB)) + if (bp->native_io) dmacfg &= ~GEM_BIT(ENDIA_DESC); else dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ - /* Restore net_ctrl */ - macb_writel(bp, NCR, ncr); - if (bp->dev->features & NETIF_F_HW_CSUM) dmacfg |= GEM_BIT(TXCOEN); else @@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu) static void gem_update_stats(struct macb *bp) { - int i; + unsigned int i; u32 *p = &bp->hw_stats.gem.tx_octets_31_0; for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { u32 offset = gem_statistics[i].offset; - u64 val = readl_relaxed(bp->regs + offset); + u64 val = bp->macb_reg_readl(bp, offset); bp->ethtool_stats[i] += val; *p += val; if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { /* Add GEM_OCTTXH, GEM_OCTRXH */ - val = readl_relaxed(bp->regs + offset + 4); + val = bp->macb_reg_readl(bp, offset + 4); bp->ethtool_stats[i] += ((u64)val) << 32; *(++p) += val; } @@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset) static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) { - int i; + unsigned int i; switch (sset) { case ETH_SS_STATS: @@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co if (dt_conf) bp->caps = dt_conf->caps; - if (macb_is_gem_hw(bp->regs)) { + if (hw_is_gem(bp->regs, bp->native_io)) { bp->caps |= MACB_CAPS_MACB_IS_GEM; dcfg = gem_readl(bp, DCFG1); @@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co bp->caps |= MACB_CAPS_FIFO_MODE; } - netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); + dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); } static void macb_probe_queues(void __iomem *mem, + bool native_io, unsigned int *queue_mask, unsigned int *num_queues) { @@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem, * we are early in the probe process and don't have the * MACB_CAPS_MACB_IS_GEM flag positioned */ - if (!macb_is_gem_hw(mem)) + if (!hw_is_gem(mem, native_io)) return; /* bit 0 is never set but queue 0 always exists */ @@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev) struct clk *pclk, *hclk, *tx_clk; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; + bool native_io; struct phy_device *phydev; struct net_device *dev; struct resource *regs; @@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev) struct macb *bp; int err; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mem = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(mem)) + return PTR_ERR(mem); + if (np) { const struct of_device_id *match; @@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev) if (err) return err; - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mem = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(mem)) { - err = PTR_ERR(mem); - goto err_disable_clocks; - } + native_io = hw_is_native_io(mem); - macb_probe_queues(mem, &queue_mask, &num_queues); + macb_probe_queues(mem, native_io, &queue_mask, &num_queues); dev = alloc_etherdev_mq(sizeof(*bp), num_queues); if (!dev) { err = -ENOMEM; @@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev) bp->pdev = pdev; bp->dev = dev; bp->regs = mem; + bp->native_io = native_io; + if (native_io) { + bp->macb_reg_readl = hw_readl_native; + bp->macb_reg_writel = hw_writel_native; + } else { + bp->macb_reg_readl = hw_readl; + bp->macb_reg_writel = hw_writel; + } bp->num_queues = num_queues; bp->queue_mask = queue_mask; if (macb_config) @@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev) bp->pclk = pclk; bp->hclk = hclk; bp->tx_clk = tx_clk; - if (macb_config->jumbo_max_len) { + if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; - } spin_lock_init(&bp->lock); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d74655993d4b..1895b6b2addd 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -429,18 +429,12 @@ | GEM_BF(name, value)) /* Register access macros */ -#define macb_readl(port,reg) \ - readl_relaxed((port)->regs + MACB_##reg) -#define macb_writel(port,reg,value) \ - writel_relaxed((value), (port)->regs + MACB_##reg) -#define gem_readl(port, reg) \ - readl_relaxed((port)->regs + GEM_##reg) -#define gem_writel(port, reg, value) \ - writel_relaxed((value), (port)->regs + GEM_##reg) -#define queue_readl(queue, reg) \ - readl_relaxed((queue)->bp->regs + (queue)->reg) -#define queue_writel(queue, reg, value) \ - writel_relaxed((value), (queue)->bp->regs + (queue)->reg) +#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg) +#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value)) +#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg) +#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value)) +#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg) +#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value)) /* Conditional GEM/MACB macros. These perform the operation to the correct * register dependent on whether the device is a GEM or a MACB. For registers @@ -785,6 +779,11 @@ struct macb_queue { struct macb { void __iomem *regs; + bool native_io; + + /* hardware IO accessors */ + u32 (*macb_reg_readl)(struct macb *bp, int offset); + void (*macb_reg_writel)(struct macb *bp, int offset, u32 value); unsigned int rx_tail; unsigned int rx_prepared_head; @@ -817,9 +816,9 @@ struct macb { struct mii_bus *mii_bus; struct phy_device *phy_dev; - unsigned int link; - unsigned int speed; - unsigned int duplex; + int link; + int speed; + int duplex; u32 caps; unsigned int dma_burst_length; @@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp) return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); } -static inline bool macb_is_gem_hw(void __iomem *addr) -{ - return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2); -} - #endif /* _MACB_H */ diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index c4d6bbe9458d..02e23e6f1424 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM config THUNDER_NIC_PF tristate "Thunder Physical function driver" depends on 64BIT - default ARCH_THUNDER select THUNDER_NIC_BGX ---help--- This driver supports Thunder's NIC physical function. @@ -29,14 +28,12 @@ config THUNDER_NIC_PF config THUNDER_NIC_VF tristate "Thunder Virtual function driver" depends on 64BIT - default ARCH_THUNDER ---help--- This driver supports Thunder's NIC virtual function config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT - default ARCH_THUNDER ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index dda8a02b7322..8aee250904ec 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -125,6 +125,15 @@ */ #define NICPF_CLK_PER_INT_TICK 2 +/* Time to wait before we decide that a SQ is stuck. + * + * Since both pkt rx and tx notifications are done with same CQ, + * when packets are being received at very high rate (eg: L2 forwarding) + * then freeing transmitted skbs will be delayed and watchdog + * will kick in, resetting interface. Hence keeping this value high. + */ +#define NICVF_TX_TIMEOUT (50 * HZ) + struct nicvf_cq_poll { u8 cq_idx; /* Completion queue index */ struct napi_struct napi; @@ -216,8 +225,9 @@ struct nicvf_drv_stats { /* Tx */ u64 tx_frames_ok; u64 tx_drops; - u64 tx_busy; u64 tx_tso; + u64 txq_stop; + u64 txq_wake; }; struct nicvf { diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 16bd2d772db9..a4228e664567 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = { NICVF_DRV_STAT(rx_frames_jumbo), NICVF_DRV_STAT(rx_drops), NICVF_DRV_STAT(tx_frames_ok), - NICVF_DRV_STAT(tx_busy), NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_drops), + NICVF_DRV_STAT(txq_stop), + NICVF_DRV_STAT(txq_wake), }; static const struct nicvf_stat nicvf_queue_stats[] = { @@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) { + struct nicvf *nic = netdev_priv(netdev); int stats, qidx; if (sset != ETH_SS_STATS) @@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) data += ETH_GSTRING_LEN; } - for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { for (stats = 0; stats < nicvf_n_queue_stats; stats++) { sprintf(data, "rxq%d: %s", qidx, nicvf_queue_stats[stats].name); @@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) } } - for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { for (stats = 0; stats < nicvf_n_queue_stats; stats++) { sprintf(data, "txq%d: %s", qidx, nicvf_queue_stats[stats].name); @@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) static int nicvf_get_sset_count(struct net_device *netdev, int sset) { + struct nicvf *nic = netdev_priv(netdev); + if (sset != ETH_SS_STATS) return -EINVAL; return nicvf_n_hw_stats + nicvf_n_drv_stats + (nicvf_n_queue_stats * - (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + + (nic->qs->rq_cnt + nic->qs->sq_cnt)) + BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; } @@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, *(data++) = ((u64 *)&nic->drv_stats) [nicvf_drv_stats[stat].index]; - for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { for (stat = 0; stat < nicvf_n_queue_stats; stat++) *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) [nicvf_queue_stats[stat].index]; } - for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { for (stat = 0; stat < nicvf_n_queue_stats; stat++) *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) [nicvf_queue_stats[stat].index]; @@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev, { struct nicvf *nic = netdev_priv(dev); int err = 0; + bool if_up = netif_running(dev); if (!channel->rx_count || !channel->tx_count) return -EINVAL; @@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev, if (channel->tx_count > MAX_SND_QUEUES_PER_QS) return -EINVAL; + if (if_up) + nicvf_stop(dev); + nic->qs->rq_cnt = channel->rx_count; nic->qs->sq_cnt = channel->tx_count; nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); @@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev, if (err) return err; - if (!netif_running(dev)) - return err; + if (if_up) + nicvf_open(dev); - nicvf_stop(dev); - nicvf_open(dev); netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", nic->qs->sq_cnt, nic->qs->rq_cnt); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 8b119a035b7e..3b90afb8c293 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->duplex == DUPLEX_FULL ? "Full duplex" : "Half duplex"); netif_carrier_on(nic->netdev); - netif_tx_wake_all_queues(nic->netdev); + netif_tx_start_all_queues(nic->netdev); } else { netdev_info(nic->netdev, "%s: Link is Down\n", nic->netdev->name); @@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, if (skb) { prefetch(skb); dev_consume_skb_any(skb); + sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; } } @@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, struct napi_struct *napi, int budget) { - int processed_cqe, work_done = 0; + int processed_cqe, work_done = 0, tx_done = 0; int cqe_count, cqe_head; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct cmp_queue *cq = &qs->cq[cq_idx]; struct cqe_rx_t *cq_desc; + struct netdev_queue *txq; spin_lock_bh(&cq->lock); loop: @@ -496,8 +498,8 @@ loop: cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; cqe_head &= 0xFFFF; - netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", - __func__, cqe_count, cqe_head); + netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n", + __func__, cq_idx, cqe_count, cqe_head); while (processed_cqe < cqe_count) { /* Get the CQ descriptor */ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); @@ -511,8 +513,8 @@ loop: break; } - netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", - cq_desc->cqe_type); + netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n", + cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: nicvf_rcv_pkt_handler(netdev, napi, cq, @@ -522,6 +524,7 @@ loop: case CQE_TYPE_SEND: nicvf_snd_pkt_handler(netdev, cq, (void *)cq_desc, CQE_TYPE_SEND); + tx_done++; break; case CQE_TYPE_INVALID: case CQE_TYPE_RX_SPLIT: @@ -532,8 +535,9 @@ loop: } processed_cqe++; } - netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", - __func__, processed_cqe, work_done, budget); + netdev_dbg(nic->netdev, + "%s CQ%d processed_cqe %d work_done %d budget %d\n", + __func__, cq_idx, processed_cqe, work_done, budget); /* Ring doorbell to inform H/W to reuse processed CQEs */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, @@ -543,6 +547,19 @@ loop: goto loop; done: + /* Wakeup TXQ if its stopped earlier due to SQ full */ + if (tx_done) { + txq = netdev_get_tx_queue(netdev, cq_idx); + if (netif_tx_queue_stopped(txq)) { + netif_tx_start_queue(txq); + nic->drv_stats.txq_wake++; + if (netif_msg_tx_err(nic)) + netdev_warn(netdev, + "%s: Transmit queue wakeup SQ%d\n", + netdev->name, cq_idx); + } + } + spin_unlock_bh(&cq->lock); return work_done; } @@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget) struct net_device *netdev = napi->dev; struct nicvf *nic = netdev_priv(netdev); struct nicvf_cq_poll *cq; - struct netdev_queue *txq; cq = container_of(napi, struct nicvf_cq_poll, napi); work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); - txq = netdev_get_tx_queue(netdev, cq->cq_idx); - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); - if (work_done < budget) { /* Slow packet rate, exit polling */ napi_complete(napi); @@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { + if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { netif_tx_stop_queue(txq); - nic->drv_stats.tx_busy++; + nic->drv_stats.txq_stop++; if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit ring full, stopping SQ%d\n", @@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev) nicvf_send_msg_to_pf(nic, &mbx); netif_carrier_off(netdev); - netif_tx_disable(netdev); /* Disable RBDR & QS error interrupts */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { @@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev) kfree(cq_poll); } + netif_tx_disable(netdev); + /* Free resources */ nicvf_config_data_transfer(nic, false); @@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev) for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); + nic->drv_stats.txq_stop = 0; + nic->drv_stats.txq_wake = 0; + netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); @@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = netdev->features; netdev->netdev_ops = &nicvf_netdev_ops; + netdev->watchdog_timeo = NICVF_TX_TIMEOUT; INIT_WORK(&nic->reset_task, nicvf_reset_task); @@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static void nicvf_shutdown(struct pci_dev *pdev) +{ + nicvf_remove(pdev); +} + static struct pci_driver nicvf_driver = { .name = DRV_NAME, .id_table = nicvf_id_table, .probe = nicvf_probe, .remove = nicvf_remove, + .shutdown = nicvf_shutdown, }; static int __init nicvf_init_module(void) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d69d228d11a0..ca4240aa6d15 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, /* Allocate a new page */ if (!nic->rb_page) { - nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); + nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, + order); if (!nic->rb_page) { - netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); + netdev_err(nic->netdev, + "Failed to allocate new rcv buffer\n"); return -ENOMEM; } nic->rb_page_offset = 0; @@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) return; if (sq->tso_hdrs) - dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, + dma_free_coherent(&nic->pdev->dev, + sq->dmem.q_len * TSO_HEADER_SIZE, sq->tso_hdrs, sq->tso_hdrs_phys); kfree(sq->skbuff); @@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, continue; } skb = (struct sk_buff *)sq->skbuff[sq->head]; + if (skb) + dev_kfree_skb_any(skb); atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); atomic64_add(hdr->tot_len, (atomic64_t *)&netdev->stats.tx_bytes); - dev_kfree_skb_any(skb); nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } } @@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, memset(gather, 0, SND_QUEUE_DESC_SIZE); gather->subdesc_type = SQ_DESC_TYPE_GATHER; - gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; + gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; gather->size = size; gather->addr = data; } @@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, } nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, seg_subdescs - 1, skb, seg_len); - sq->skbuff[hdr_qentry] = 0; + sq->skbuff[hdr_qentry] = (u64)NULL; qentry = nicvf_get_nxt_sqentry(sq, qentry); desc_cnt += seg_subdescs; @@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, /* Inform HW to xmit all TSO segments */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, skb_get_queue_mapping(skb), desc_cnt); + nic->drv_stats.tx_tso++; return 1; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 8341bdf755d1..f0937b7bfe9f 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -62,7 +62,7 @@ #define SND_QUEUE_CNT 8 #define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ -#define SND_QSIZE SND_QUEUE_SIZE4 +#define SND_QSIZE SND_QUEUE_SIZE2 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) #define SND_QUEUE_THRESH 2ULL @@ -70,7 +70,10 @@ /* Since timestamp not enabled, otherwise 2 */ #define MAX_CQE_PER_PKT_XMIT 1 -#define CMP_QSIZE CMP_QUEUE_SIZE4 +/* Keep CQ and SQ sizes same, if timestamping + * is enabled this equation will change. + */ +#define CMP_QSIZE CMP_QUEUE_SIZE2 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) #define CMP_QUEUE_CQE_THRESH 0 #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ @@ -87,7 +90,12 @@ #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) -#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) +/* Calculate number of CQEs to reserve for all SQEs. + * Its 1/256th level of CQ size. + * '+ 1' to account for pipelining + */ +#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ + (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) /* Descriptor size in bytes */ #define SND_QUEUE_DESC_SIZE 16 diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 633ec05dfe05..b961a89dc626 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); bgx_flush_dmac_addrs(bgx, lmacid); - if (lmac->phydev) + if ((bgx->lmac_type != BGX_MODE_XFI) && + (bgx->lmac_type != BGX_MODE_XLAUI) && + (bgx->lmac_type != BGX_MODE_40G_KR) && + (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) phy_disconnect(lmac->phydev); lmac->phydev = NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index a11485fbb33f..c3c7db41819d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap) EXT_MEM1_SIZE_G(size)); } } else { - if (i & EXT_MEM_ENABLE_F) + if (i & EXT_MEM_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); add_debugfs_mem(adap, "mc", MEM_MC, EXT_MEM_SIZE_G(size)); + } } de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 2716e6f30d9a..00e3a6b6b822 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -620,6 +620,11 @@ enum be_if_flags { BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_MCAST_PROMISCUOUS) +#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ + BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) + +#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) + /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ struct be_cmd_req_if_create { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f642426308c..c28e3bfdccd7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) return 0; + /* if device is not running, copy MAC to netdev->dev_addr */ + if (!netif_running(netdev)) + goto done; + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the @@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) status = -EPERM; goto err; } - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - dev_info(dev, "MAC address changed to %pM\n", mac); +done: + ether_addr_copy(netdev->dev_addr, addr->sa_data); + dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; err: dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); @@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo) be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); } -static void be_rx_cq_clean(struct be_rx_obj *rxo) +/* Free posted rx buffers that were not used */ +static void be_rxq_clean(struct be_rx_obj *rxo) { - struct be_rx_page_info *page_info; struct be_queue_info *rxq = &rxo->q; + struct be_rx_page_info *page_info; + + while (atomic_read(&rxq->used) > 0) { + page_info = get_rx_page_info(rxo); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(atomic_read(&rxq->used)); + rxq->tail = 0; + rxq->head = 0; +} + +static void be_rx_cq_clean(struct be_rx_obj *rxo) +{ struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; struct be_adapter *adapter = rxo->adapter; @@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) /* After cleanup, leave the CQ in unarmed state */ be_cq_notify(adapter, rx_cq->id, false, 0); - - /* Then free posted rx buffers that were not used */ - while (atomic_read(&rxq->used) > 0) { - page_info = get_rx_page_info(rxo); - put_page(page_info->page); - memset(page_info, 0, sizeof(*page_info)); - } - BUG_ON(atomic_read(&rxq->used)); - rxq->tail = 0; - rxq->head = 0; } static void be_tx_compl_clean(struct be_adapter *adapter) @@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); + free_cpumask_var(eqo->affinity_mask); } - free_cpumask_var(eqo->affinity_mask); be_queue_free(adapter, &eqo->q); } } @@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); - if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) - return -ENOMEM; - cpumask_set_cpu(cpumask_local_spread(i, numa_node), - eqo->affinity_mask); - netif_napi_add(adapter->netdev, &eqo->napi, be_poll, - BE_NAPI_WEIGHT); - napi_hash_add(&eqo->napi); + aic = &adapter->aic_obj[i]; eqo->adapter = adapter; eqo->idx = i; @@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter) rc = be_cmd_eq_create(adapter, eqo); if (rc) return rc; + + if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + eqo->affinity_mask); + netif_napi_add(adapter->netdev, &eqo->napi, be_poll, + BE_NAPI_WEIGHT); + napi_hash_add(&eqo->napi); } return 0; } @@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) for_all_rx_queues(adapter, rxo, i) { q = &rxo->q; if (q->created) { + /* If RXQs are destroyed while in an "out of buffer" + * state, there is a possibility of an HW stall on + * Lancer. So, post 64 buffers to each queue to relieve + * the "out of buffer" condition. + * Make sure there's space in the RXQ before posting. + */ + if (lancer_chip(adapter)) { + be_rx_cq_clean(rxo); + if (atomic_read(&q->used) == 0) + be_post_rx_frags(rxo, GFP_KERNEL, + MAX_RX_POST); + } + be_cmd_rxq_destroy(adapter, q); be_rx_cq_clean(rxo); + be_rxq_clean(rxo); } be_queue_free(adapter, q); } } +static void be_disable_if_filters(struct be_adapter *adapter) +{ + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[0], 0); + + be_clear_uc_list(adapter); + + /* The IFACE flags are enabled in the open path and cleared + * in the close path. When a VF gets detached from the host and + * assigned to a VM the following happens: + * - VF's IFACE flags get cleared in the detach path + * - IFACE create is issued by the VF in the attach path + * Due to a bug in the BE3/Skyhawk-R FW + * (Lancer FW doesn't have the bug), the IFACE capability flags + * specified along with the IFACE create cmd issued by a VF are not + * honoured by FW. As a consequence, if a *new* driver + * (that enables/disables IFACE flags in open/close) + * is loaded in the host and an *old* driver is * used by a VM/VF, + * the IFACE gets created *without* the needed flags. + * To avoid this, disable RX-filter flags only for Lancer. + */ + if (lancer_chip(adapter)) { + be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF); + adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS; + } +} + static int be_close(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev) if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) return 0; + be_disable_if_filters(adapter); + be_roce_dev_close(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { @@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev) be_tx_compl_clean(adapter); be_rx_qs_destroy(adapter); - be_clear_uc_list(adapter); for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) @@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter) return 0; } +static int be_enable_if_filters(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); + if (status) + return status; + + /* For BE3 VFs, the PF programs the initial MAC address */ + if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr, + adapter->if_handle, + &adapter->pmac_id[0], 0); + if (status) + return status; + } + + if (adapter->vlans_added) + be_vid_config(adapter); + + be_set_rx_mode(adapter->netdev); + + return 0; +} + static int be_open(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev) if (status) goto err; + status = be_enable_if_filters(adapter); + if (status) + goto err; + status = be_irq_register(adapter); if (status) goto err; @@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } -static void be_mac_clear(struct be_adapter *adapter) -{ - if (adapter->pmac_id) { - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[0], 0); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - } -} - #ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { @@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter) #ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); #endif - /* delete the primary mac along with the uc-mac list */ - be_mac_clear(adapter); + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; be_cmd_if_destroy(adapter, adapter->if_handle, 0); @@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter) return 0; } -static int be_if_create(struct be_adapter *adapter, u32 *if_handle, - u32 cap_flags, u32 vf) -{ - u32 en_flags; - - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | - BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; - - en_flags &= cap_flags; - - return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); -} - static int be_vfs_if_create(struct be_adapter *adapter) { struct be_resources res = {0}; + u32 cap_flags, en_flags, vf; struct be_vf_cfg *vf_cfg; - u32 cap_flags, vf; int status; /* If a FW profile exists, then cap_flags are updated */ @@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter) } } - status = be_if_create(adapter, &vf_cfg->if_handle, - cap_flags, vf + 1); + en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST | + BE_IF_FLAGS_PASS_L3L4_ERRORS); + status = be_cmd_if_create(adapter, cap_flags, en_flags, + &vf_cfg->if_handle, vf + 1); if (status) return status; } @@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter) memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - } else { - /* Maybe the HW was reset; dev_addr must be re-programmed */ - memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); } - /* For BE3-R VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) - be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); return 0; } @@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + u32 en_flags; int status; status = be_func_init(adapter); @@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - status = be_if_create(adapter, &adapter->if_handle, - be_if_cap_flags(adapter), 0); + /* will enable all the needed filter flags in be_open() */ + en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + en_flags = en_flags & be_if_cap_flags(adapter); + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + &adapter->if_handle, 0); if (status) goto err; @@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter) dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); } - if (adapter->vlans_added) - be_vid_config(adapter); - - be_set_rx_mode(adapter->netdev); - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 1eee73cccdf5..99d33e2d35e6 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -562,6 +562,7 @@ struct fec_enet_private { }; void fec_ptp_init(struct platform_device *pdev); +void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 1f89c59b4353..271bb5862346 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> +#include <linux/pm_runtime.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_RAEM_V 0x8 #define FEC_ENET_RAFL_V 0x8 #define FEC_ENET_OPD_V 0xFFF0 +#define FEC_MDIO_PM_TIMEOUT 100 /* ms */ static struct platform_device_id fec_devtype[] = { { @@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev) static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (IS_ERR_VALUE(ret)) + return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto out; } - /* return value */ - return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + +out: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (IS_ERR_VALUE(ret)) + return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; } - return 0; + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_clk_enable(struct net_device *ndev, bool enable) @@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; - ret = clk_prepare_enable(fep->clk_ipg); - if (ret) - goto failed_clk_ipg; if (fep->clk_enet_out) { ret = clk_prepare_enable(fep->clk_enet_out); if (ret) @@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } } else { clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { @@ -1874,8 +1893,6 @@ failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ipg); -failed_clk_ipg: clk_disable_unprepare(fep->clk_ahb); return ret; @@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; + ret = pm_runtime_get_sync(&fep->pdev->dev); + if (IS_ERR_VALUE(ret)) + return ret; + pinctrl_pm_select_default_state(&fep->pdev->dev); ret = fec_enet_clk_enable(ndev, true); if (ret) - return ret; + goto clk_enable; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. @@ -2881,6 +2902,9 @@ err_enet_mii_probe: fec_enet_free_buffers(ndev); err_enet_alloc: fec_enet_clk_enable(ndev, false); +clk_enable: + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; } @@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); + fec_enet_free_buffers(ndev); return 0; @@ -3115,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev) fep->bufdesc_size; /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, - GFP_KERNEL); + cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); if (!cbd_base) { return -ENOMEM; } @@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); @@ -3400,6 +3431,12 @@ fec_probe(struct platform_device *pdev) fep->reg_phy = NULL; } + pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + fec_reset_phy(pdev); if (fep->bufdesc_ex) @@ -3447,6 +3484,10 @@ fec_probe(struct platform_device *pdev) fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; failed_register: @@ -3454,9 +3495,12 @@ failed_register: failed_mii_init: failed_irq: failed_init: + fec_ptp_stop(pdev); if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: failed_phy: @@ -3473,14 +3517,12 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - cancel_delayed_work_sync(&fep->time_keep); cancel_work_sync(&fep->tx_timeout_work); + fec_ptp_stop(pdev); unregister_netdev(ndev); fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); - if (fep->ptp_clock) - ptp_clock_unregister(fep->ptp_clock); of_node_put(fep->phy_node); free_netdev(ndev); @@ -3568,7 +3610,28 @@ failed_clk: return ret; } -static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); +static int __maybe_unused fec_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + clk_disable_unprepare(fep->clk_ipg); + + return 0; +} + +static int __maybe_unused fec_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + return clk_prepare_enable(fep->clk_ipg); +} + +static const struct dev_pm_ops fec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) +}; static struct platform_driver fec_driver = { .driver = { diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index a15663ad7f5e..f457a23d0bfb 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev) schedule_delayed_work(&fep->time_keep, HZ); } +void fec_ptp_stop(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + cancel_delayed_work_sync(&fep->time_keep); + if (fep->ptp_clock) + ptp_clock_unregister(fep->ptp_clock); +} + /** * fec_ptp_check_pps_event * @fep: the fec_enet_private structure handle diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 56316db6c5a6..cf8e54652df9 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) frag = skb_shinfo(skb)->frags; while (nr_frags) { CBDC_SC(bdp, - BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); + BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST | + BD_ENET_TX_TC); CBDS_SC(bdp, BD_ENET_TX_READY); if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index b34214e2df5f..016743e355de 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) -#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF) #define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index ff875028fdff..2b7610f341b0 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv) } } -static void lock_tx_qs(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_tx_queues; i++) - spin_lock(&priv->tx_queue[i]->txlock); -} - -static void unlock_tx_qs(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_tx_queues; i++) - spin_unlock(&priv->tx_queue[i]->txlock); -} - static int gfar_alloc_tx_queues(struct gfar_private *priv) { int i; @@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->dev = &ofdev->dev; SET_NETDEV_DEV(dev, &ofdev->dev); - spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); platform_set_drvdata(ofdev, priv); @@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev) goto register_fail; } - device_init_wakeup(&dev->dev, - priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&dev->dev, priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); /* fill out IRQ number and name fields */ for (i = 0; i < priv->num_grps; i++) { @@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; - int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + if (!netif_running(ndev)) + return 0; + + disable_napi(priv); + netif_tx_lock(ndev); netif_device_detach(ndev); + netif_tx_unlock(ndev); - if (netif_running(ndev)) { + gfar_halt(priv); - local_irq_save(flags); - lock_tx_qs(priv); + if (magic_packet) { + /* Enable interrupt on Magic Packet */ + gfar_write(®s->imask, IMASK_MAG); - gfar_halt_nodisable(priv); + /* Enable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); - /* Disable Tx, and Rx if wake-on-LAN is disabled. */ + /* re-enable the Rx block */ tempval = gfar_read(®s->maccfg1); - - tempval &= ~MACCFG1_TX_EN; - - if (!magic_packet) - tempval &= ~MACCFG1_RX_EN; - + tempval |= MACCFG1_RX_EN; gfar_write(®s->maccfg1, tempval); - unlock_tx_qs(priv); - local_irq_restore(flags); - - disable_napi(priv); - - if (magic_packet) { - /* Enable interrupt on Magic Packet */ - gfar_write(®s->imask, IMASK_MAG); - - /* Enable Magic Packet mode */ - tempval = gfar_read(®s->maccfg2); - tempval |= MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); - } else { - phy_stop(priv->phydev); - } + } else { + phy_stop(priv->phydev); } return 0; @@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); - if (!netif_running(ndev)) { - netif_device_attach(ndev); + if (!netif_running(ndev)) return 0; - } - if (!magic_packet && priv->phydev) + if (magic_packet) { + /* Disable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval &= ~MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); + } else { phy_start(priv->phydev); - - /* Disable Magic Packet mode, in case something - * else woke us up. - */ - local_irq_save(flags); - lock_tx_qs(priv); - - tempval = gfar_read(®s->maccfg2); - tempval &= ~MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); + } gfar_start(priv); - unlock_tx_qs(priv); - local_irq_restore(flags); - netif_device_attach(ndev); - enable_napi(priv); return 0; @@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) /* Install our interrupt handlers for Error, * Transmit, and Receive */ - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, + IRQF_NO_SUSPEND, gfar_irq(grp, ER)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto rx_irq_fail; } } else { - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, + IRQF_NO_SUSPEND, gfar_irq(grp, TX)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -2169,8 +2131,6 @@ static int gfar_enet_open(struct net_device *dev) if (err) return err; - device_set_wakeup_enable(&dev->dev, priv->wol_en); - return err; } diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index daa1d37de642..5545e4103368 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1145,9 +1145,6 @@ struct gfar_private { int oldduplex; int oldlink; - /* Bitfield update lock */ - spinlock_t bflock; - uint32_t msg_enable; struct work_struct reset_task; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index fda12fb32ec7..5b90fcf96265 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && wol->wolopts != 0) @@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); - spin_lock_irqsave(&priv->bflock, flags); - priv->wol_en = !!device_may_wakeup(&dev->dev); - spin_unlock_irqrestore(&priv->bflock, flags); + priv->wol_en = !!device_may_wakeup(&dev->dev); return 0; } @@ -903,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) return 0; } -static int gfar_comp_asc(const void *a, const void *b) -{ - return memcmp(a, b, 4); -} - -static int gfar_comp_desc(const void *a, const void *b) -{ - return -memcmp(a, b, 4); -} - -static void gfar_swap(void *a, void *b, int size) -{ - u32 *_a = a; - u32 *_b = b; - - swap(_a[0], _b[0]); - swap(_a[1], _b[1]); - swap(_a[2], _b[2]); - swap(_a[3], _b[3]); -} - /* Write a mask to filer cache */ static void gfar_set_mask(u32 mask, struct filer_table *tab) { @@ -1273,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, return 0; } -/* Copy size filer entries */ -static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], - struct gfar_filer_entry src[0], s32 size) -{ - while (size > 0) { - size--; - dst[size].ctrl = src[size].ctrl; - dst[size].prop = src[size].prop; - } -} - -/* Delete the contents of the filer-table between start and end - * and collapse them - */ -static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) -{ - int length; - - if (end > MAX_FILER_CACHE_IDX || end < begin) - return -EINVAL; - - end++; - length = end - begin; - - /* Copy */ - while (end < tab->index) { - tab->fe[begin].ctrl = tab->fe[end].ctrl; - tab->fe[begin++].prop = tab->fe[end++].prop; - - } - /* Fill up with don't cares */ - while (begin < tab->index) { - tab->fe[begin].ctrl = 0x60; - tab->fe[begin].prop = 0xFFFFFFFF; - begin++; - } - - tab->index -= length; - return 0; -} - -/* Make space on the wanted location */ -static int gfar_expand_filer_entries(u32 begin, u32 length, - struct filer_table *tab) -{ - if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || - begin > MAX_FILER_CACHE_IDX) - return -EINVAL; - - gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), - tab->index - length + 1); - - tab->index += length; - return 0; -} - -static int gfar_get_next_cluster_start(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_AND | RQFCR_CLE)) - return start; - } - return -1; -} - -static int gfar_get_next_cluster_end(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_CLE)) - return start; - } - return -1; -} - -/* Uses hardwares clustering option to reduce - * the number of filer table entries - */ -static void gfar_cluster_filer(struct filer_table *tab) -{ - s32 i = -1, j, iend, jend; - - while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { - j = i; - while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { - /* The cluster entries self and the previous one - * (a mask) must be identical! - */ - if (tab->fe[i].ctrl != tab->fe[j].ctrl) - break; - if (tab->fe[i].prop != tab->fe[j].prop) - break; - if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) - break; - if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) - break; - iend = gfar_get_next_cluster_end(i, tab); - jend = gfar_get_next_cluster_end(j, tab); - if (jend == -1 || iend == -1) - break; - - /* First we make some free space, where our cluster - * element should be. Then we copy it there and finally - * delete in from its old location. - */ - if (gfar_expand_filer_entries(iend, (jend - j), tab) == - -EINVAL) - break; - - gfar_copy_filer_entries(&(tab->fe[iend + 1]), - &(tab->fe[jend + 1]), jend - j); - - if (gfar_trim_filer_entries(jend - 1, - jend + (jend - j), - tab) == -EINVAL) - return; - - /* Mask out cluster bit */ - tab->fe[iend].ctrl &= ~(RQFCR_CLE); - } - } -} - -/* Swaps the masked bits of a1<>a2 and b1<>b2 */ -static void gfar_swap_bits(struct gfar_filer_entry *a1, - struct gfar_filer_entry *a2, - struct gfar_filer_entry *b1, - struct gfar_filer_entry *b2, u32 mask) -{ - u32 temp[4]; - temp[0] = a1->ctrl & mask; - temp[1] = a2->ctrl & mask; - temp[2] = b1->ctrl & mask; - temp[3] = b2->ctrl & mask; - - a1->ctrl &= ~mask; - a2->ctrl &= ~mask; - b1->ctrl &= ~mask; - b2->ctrl &= ~mask; - - a1->ctrl |= temp[1]; - a2->ctrl |= temp[0]; - b1->ctrl |= temp[3]; - b2->ctrl |= temp[2]; -} - -/* Generate a list consisting of masks values with their start and - * end of validity and block as indicator for parts belonging - * together (glued by ANDs) in mask_table - */ -static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *tab) -{ - u32 i, and_index = 0, block_index = 1; - - for (i = 0; i < tab->index; i++) { - - /* LSByte of control = 0 sets a mask */ - if (!(tab->fe[i].ctrl & 0xF)) { - mask_table[and_index].mask = tab->fe[i].prop; - mask_table[and_index].start = i; - mask_table[and_index].block = block_index; - if (and_index >= 1) - mask_table[and_index - 1].end = i - 1; - and_index++; - } - /* cluster starts and ends will be separated because they should - * hold their position - */ - if (tab->fe[i].ctrl & RQFCR_CLE) - block_index++; - /* A not set AND indicates the end of a depended block */ - if (!(tab->fe[i].ctrl & RQFCR_AND)) - block_index++; - } - - mask_table[and_index - 1].end = i - 1; - - return and_index; -} - -/* Sorts the entries of mask_table by the values of the masks. - * Important: The 0xFF80 flags of the first and last entry of a - * block must hold their position (which queue, CLusterEnable, ReJEct, - * AND) - */ -static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *temp_table, u32 and_index) -{ - /* Pointer to compare function (_asc or _desc) */ - int (*gfar_comp)(const void *, const void *); - - u32 i, size = 0, start = 0, prev = 1; - u32 old_first, old_last, new_first, new_last; - - gfar_comp = &gfar_comp_desc; - - for (i = 0; i < and_index; i++) { - if (prev != mask_table[i].block) { - old_first = mask_table[start].start + 1; - old_last = mask_table[i - 1].end; - sort(mask_table + start, size, - sizeof(struct gfar_mask_entry), - gfar_comp, &gfar_swap); - - /* Toggle order for every block. This makes the - * thing more efficient! - */ - if (gfar_comp == gfar_comp_desc) - gfar_comp = &gfar_comp_asc; - else - gfar_comp = &gfar_comp_desc; - - new_first = mask_table[start].start + 1; - new_last = mask_table[i - 1].end; - - gfar_swap_bits(&temp_table->fe[new_first], - &temp_table->fe[old_first], - &temp_table->fe[new_last], - &temp_table->fe[old_last], - RQFCR_QUEUE | RQFCR_CLE | - RQFCR_RJE | RQFCR_AND); - - start = i; - size = 0; - } - size++; - prev = mask_table[i].block; - } -} - -/* Reduces the number of masks needed in the filer table to save entries - * This is done by sorting the masks of a depended block. A depended block is - * identified by gluing ANDs or CLE. The sorting order toggles after every - * block. Of course entries in scope of a mask must change their location with - * it. - */ -static int gfar_optimize_filer_masks(struct filer_table *tab) -{ - struct filer_table *temp_table; - struct gfar_mask_entry *mask_table; - - u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; - s32 ret = 0; - - /* We need a copy of the filer table because - * we want to change its order - */ - temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); - if (temp_table == NULL) - return -ENOMEM; - - mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, - sizeof(struct gfar_mask_entry), GFP_KERNEL); - - if (mask_table == NULL) { - ret = -ENOMEM; - goto end; - } - - and_index = gfar_generate_mask_table(mask_table, tab); - - gfar_sort_mask_table(mask_table, temp_table, and_index); - - /* Now we can copy the data from our duplicated filer table to - * the real one in the order the mask table says - */ - for (i = 0; i < and_index; i++) { - size = mask_table[i].end - mask_table[i].start + 1; - gfar_copy_filer_entries(&(tab->fe[j]), - &(temp_table->fe[mask_table[i].start]), size); - j += size; - } - - /* And finally we just have to check for duplicated masks and drop the - * second ones - */ - for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - previous_mask = i++; - break; - } - } - for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - if (tab->fe[i].prop == tab->fe[previous_mask].prop) { - /* Two identical ones found! - * So drop the second one! - */ - gfar_trim_filer_entries(i, i, tab); - } else - /* Not identical! */ - previous_mask = i; - } - } - - kfree(mask_table); -end: kfree(temp_table); - return ret; -} - /* Write the bit-pattern from software's buffer to hardware registers */ static int gfar_write_filer_table(struct gfar_private *priv, struct filer_table *tab) @@ -1586,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv, return -EBUSY; /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); - i++) + for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ - for (; i < MAX_FILER_IDX - 1; i++) + for (; i < MAX_FILER_IDX; i++) gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); /* Last entry must be default accept * because that's what people expect @@ -1624,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) { struct ethtool_flow_spec_container *j; struct filer_table *tab; - s32 i = 0; s32 ret = 0; /* So index is set to zero, too! */ @@ -1649,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) } } - i = tab->index; - - /* Optimizations to save entries */ - gfar_cluster_filer(tab); - gfar_optimize_filer_masks(tab); - - pr_debug("\tSummary:\n" - "\tData on hardware: %d\n" - "\tCompression rate: %d%%\n", - tab->index, 100 - (100 * tab->index) / i); - /* Write everything to hardware */ ret = gfar_write_filer_table(priv, tab); if (ret == -EBUSY) { @@ -1725,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv, } process: + priv->rx_list.count++; ret = gfar_process_filer_changes(priv); if (ret) goto clean_list; - priv->rx_list.count++; return ret; clean_list: + priv->rx_list.count--; list_del(&temp->list); clean_mem: kfree(temp); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 3e8b1bfb1f2e..d9884fd15b45 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -27,6 +27,8 @@ #include <linux/of_address.h> #include <linux/phy.h> #include <linux/clk.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> #include <uapi/linux/ppp_defs.h> #include <net/ip.h> #include <net/ipv6.h> @@ -299,6 +301,7 @@ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 @@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats { u64 tx_bytes; }; +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + bool timer_scheduled; + /* Tasklet for egress finalization */ + struct tasklet_struct tx_done_tasklet; +}; + struct mvpp2_port { u8 id; @@ -679,6 +690,9 @@ struct mvpp2_port { u32 pending_cause_rx; struct napi_struct napi; + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + /* Flags */ unsigned long flags; @@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu { /* Array of transmitted skb */ struct sk_buff **tx_skb; + /* Array of transmitted buffers' physical addresses */ + dma_addr_t *tx_buffs; + /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -913,8 +930,6 @@ struct mvpp2_bm_pool { /* Occupied buffers indicator */ atomic_t in_use; int in_use_thresh; - - spinlock_t lock; }; struct mvpp2_buff_hdr { @@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) } static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, - struct sk_buff *skb) + struct sk_buff *skb, + struct mvpp2_tx_desc *tx_desc) { txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; + if (skb) + txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] = + tx_desc->buf_phys_addr; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, bm_pool->pkt_size = 0; bm_pool->buf_num = 0; atomic_set(&bm_pool->in_use, 0); - spin_lock_init(&bm_pool->lock); return 0; } @@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool * mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, int pkt_size) { - unsigned long flags = 0; struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; @@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, return NULL; } - spin_lock_irqsave(&new_pool->lock, flags); - if (new_pool->type == MVPP2_BM_FREE) new_pool->type = type; @@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, pkts_num); - /* We need to undo the bufs_add() allocations */ - spin_unlock_irqrestore(&new_pool->lock, flags); return NULL; } } @@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, mvpp2_bm_pool_bufsize_set(port->priv, new_pool, MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); - spin_unlock_irqrestore(&new_pool->lock, flags); - return new_pool; } /* Initialize pools for swf */ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { - unsigned long flags = 0; int rxq; if (!port->pool_long) { @@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_long) return -ENOMEM; - spin_lock_irqsave(&port->pool_long->lock, flags); port->pool_long->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_long->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); @@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_short) return -ENOMEM; - spin_lock_irqsave(&port->pool_short->lock, flags); port->pool_short->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_short->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_short_pool_set(port, rxq, @@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg) mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); } @@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, rxq->time_coal = usec; } -/* Set threshold for TX_DONE pkts coalescing */ -static void mvpp2_tx_done_pkts_coal_set(void *arg) -{ - struct mvpp2_port *port = arg; - int queue; - u32 val; - - for (queue = 0; queue < txq_number; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) & - MVPP2_TRANSMITTED_THRESH_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val); - } -} - /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, @@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, int i; for (i = 0; i < num; i++) { - struct mvpp2_tx_desc *tx_desc = txq->descs + - txq_pcpu->txq_get_index; + dma_addr_t buf_phys_addr = + txq_pcpu->tx_buffs[txq_pcpu->txq_get_index]; struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; mvpp2_txq_inc_get(txq_pcpu); @@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, if (!skb) continue; - dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, - tx_desc->data_size, DMA_TO_DEVICE); + dma_unmap_single(port->dev->dev.parent, buf_phys_addr, + skb_headlen(skb), DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } @@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, u32 cause) { - int queue = fls(cause >> 16) - 1; + int queue = fls(cause) - 1; return port->txqs[queue]; } @@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, netif_tx_wake_queue(nq); } +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) +{ + struct mvpp2_tx_queue *txq; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int tx_todo = 0; + + while (cause) { + txq = mvpp2_get_tx_queue(port, cause); + if (!txq) + break; + + txq_pcpu = this_cpu_ptr(txq->pcpu); + + if (txq_pcpu->count) { + mvpp2_txq_done(port, txq, txq_pcpu); + tx_todo += txq_pcpu->count; + } + + cause &= ~(1 << txq->log_id); + } + return tx_todo; +} + /* Rx/Tx queue initialization/cleanup methods */ /* Allocate and initialize descriptors for aggr TXQ */ @@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * sizeof(*txq_pcpu->tx_skb), GFP_KERNEL); - if (!txq_pcpu->tx_skb) { - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); - return -ENOMEM; - } + if (!txq_pcpu->tx_skb) + goto error; + + txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size * + sizeof(dma_addr_t), GFP_KERNEL); + if (!txq_pcpu->tx_buffs) + goto error; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port, } return 0; + +error: + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); + } + + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + + return -ENOMEM; } /* Free allocated TXQ resources */ @@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); } if (txq->descs) @@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) goto err_cleanup; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; @@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev) } } +static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) +{ + ktime_t interval; + + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS); + hrtimer_start(&port_pcpu->tx_done_timer, interval, + HRTIMER_MODE_REL_PINNED); + } +} + +static void mvpp2_tx_proc_cb(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + unsigned int tx_todo, cause; + + if (!netif_running(dev)) + return; + port_pcpu->timer_scheduled = false; + + /* Process all the Tx queues */ + cause = (1 << txq_number) - 1; + tx_todo = mvpp2_tx_done(port, cause); + + /* Set the timer in case not all the packets were processed */ + if (tx_todo) + mvpp2_timer_set(port_pcpu); +} + +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) +{ + struct mvpp2_port_pcpu *port_pcpu = container_of(timer, + struct mvpp2_port_pcpu, + tx_done_timer); + + tasklet_schedule(&port_pcpu->tx_done_tasklet); + + return HRTIMER_NORESTART; +} + /* Main RX/TX processing routines */ /* Display more error info */ @@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ tx_desc->command = MVPP2_TXD_L_DESC; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); } } @@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { @@ -5255,6 +5323,17 @@ out: dev_kfree_skb_any(skb); } + /* Finalize TX processing */ + if (txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); + + /* Set the timer in case not all frags were processed */ + if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + + mvpp2_timer_set(port_pcpu); + } + return NETDEV_TX_OK; } @@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) netdev_err(dev, "tx fifo underrun error\n"); } -static void mvpp2_txq_done_percpu(void *arg) +static int mvpp2_poll(struct napi_struct *napi, int budget) { - struct mvpp2_port *port = arg; - u32 cause_rx_tx, cause_tx, cause_misc; + u32 cause_rx_tx, cause_rx, cause_misc; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); /* Rx/Tx cause register * @@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg) */ cause_rx_tx = mvpp2_read(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { @@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - /* Release TX descriptors */ - if (cause_tx) { - struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx); - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - - if (txq_pcpu->count) - mvpp2_txq_done(port, txq, txq_pcpu); - } -} - -static int mvpp2_poll(struct napi_struct *napi, int budget) -{ - u32 cause_rx_tx, cause_rx; - int rx_done = 0; - struct mvpp2_port *port = netdev_priv(napi->dev); - - on_each_cpu(mvpp2_txq_done_percpu, port, 1); - - cause_rx_tx = mvpp2_read(port->priv, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; /* Process RX packets */ @@ -5561,6 +5621,8 @@ err_cleanup_rxqs: static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu; + int cpu; mvpp2_stop_dev(port); mvpp2_phy_disconnect(port); @@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev) on_each_cpu(mvpp2_interrupts_mask, port, 1); free_irq(port->irq, port); + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); @@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, txq->done_pkts_coal = c->tx_max_coalesced_frames; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); return 0; } @@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, { struct device_node *phy_node; struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; struct net_device *dev; struct resource *res; const char *dt_mac_addr; @@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, int features; int phy_mode; int priv_common_regs_num = 2; - int err, i; + int err, i, cpu; dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, rxq_number); @@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev, } mvpp2_port_power_up(port); + port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); + if (!port->pcpu) { + err = -ENOMEM; + goto err_free_txq_pcpu; + } + + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; + + tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, + (unsigned long)dev); + } + netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->features = features | NETIF_F_RXCSUM; @@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_free_txq_pcpu; + goto err_free_port_pcpu; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); @@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, priv->port_list[id] = port; return 0; +err_free_port_pcpu: + free_percpu(port->pcpu); err_free_txq_pcpu: for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); @@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); + free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 82040137d7d9..0a3202047569 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, { struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; struct mlx4_cmd_context *context; + long ret_wait; int err = 0; down(&cmd->event_sem); @@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, if (err) goto out_reset; - if (!wait_for_completion_timeout(&context->done, - msecs_to_jiffies(timeout))) { + if (op == MLX4_CMD_SENSE_PORT) { + ret_wait = + wait_for_completion_interruptible_timeout(&context->done, + msecs_to_jiffies(timeout)); + if (ret_wait < 0) { + context->fw_status = 0; + context->out_param = 0; + context->result = 0; + } + } else { + ret_wait = (long)wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout)); + } + if (!ret_wait) { mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op); if (op == MLX4_CMD_NOP) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 7a4f20bb7fcb..9c145dddd717 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) { - BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); return ring->prod == ring->cons; } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index aae13adfb492..8e81e53c370e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) continue; mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", __func__, i, port); - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { eqe->event.port_change.port = cpu_to_be32( @@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) continue; if (i == mlx4_master_func_num(dev)) continue; - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { eqe->event.port_change.port = cpu_to_be32( diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 12fbfcb44d8a..29c2a017a450 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) } else if (err == -ENOENT) { err = 0; continue; + } else if (mlx4_is_slave(dev) && err == -EINVAL) { + priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); + mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", + MLX4_SINK_COUNTER_INDEX(dev)); + err = 0; } else { mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", __func__, port + 1, err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index afad529838de..06e3e1e54c35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); + MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); + err = set_caps(dev, set_ctx, set_sz); query_ex: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 33669c29b341..753ea8bad953 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) if (fw->size & 0xF) { addr = dest + size; for (i = 0; i < (fw->size & 0xF); i++) - data[i] = temp[size + i]; + data[i] = ((u8 *)temp)[size + i]; for (; i < 16; i++) data[i] = 0; ret = qlcnic_ms_mem_write128(adapter, addr, diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3df51faf18ae..f790f61ea78a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 2d8578cade03..2e7f9a2834be 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker) rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, ROCKER_OP_FLAG_REMOVE); unregister_netdev(rocker_port->dev); + free_netdev(rocker_port->dev); } kfree(rocker->ports); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 7e3129e7f143..f0e4bb4e3ec5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -42,7 +42,7 @@ #define NSS_COMMON_CLK_DIV_MASK 0x7f #define NSS_COMMON_CLK_SRC_CTRL 0x14 -#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) +#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x) /* Mode is coded on 1 bit but is different depending on the MAC ID: * MAC0: QSGMII=0 RGMII=1 * MAC1: QSGMII=0 SGMII=0 RGMII=1 @@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) /* Configure the clock src according to the mode */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); - val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id)); switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index f3918c7e7eeb..bcdc8955c719 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev) SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, stmmac_pltfr_resume); EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); +MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 0c5842aeb807..ab6051a43134 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, len); - if (!skb_new) { - rp->tx_errors++; + if (!skb_new) goto out_drop; - } kfree_skb(skb); skb = skb_new; } else diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index bbacf5cccec2..bb1bb72121c0 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -85,7 +85,6 @@ struct netcp_intf { struct list_head rxhook_list_head; unsigned int rx_queue_id; void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; - u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN]; struct napi_struct rx_napi; struct napi_struct tx_napi; @@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device, /* SGMII functions */ int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); +bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set); int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index ec8ed30196f3..4755838c6137 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -34,6 +34,7 @@ #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) #define NETCP_NAPI_WEIGHT 64 #define NETCP_TX_TIMEOUT (5 * HZ) +#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) #define NETCP_MIN_PACKET_SIZE ETH_ZLEN #define NETCP_MAX_MCAST_ADDR 16 @@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (likely(fdq == 0)) { unsigned int primary_buf_len; /* Allocate a primary receive queue entry */ - buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; + buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET; primary_buf_len = SKB_DATA_ALIGN(buf_len) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - if (primary_buf_len <= PAGE_SIZE) { - bufptr = netdev_alloc_frag(primary_buf_len); - pad[1] = primary_buf_len; - } else { - bufptr = kmalloc(primary_buf_len, GFP_ATOMIC | - GFP_DMA32 | __GFP_COLD); - pad[1] = 0; - } + bufptr = netdev_alloc_frag(primary_buf_len); + pad[1] = primary_buf_len; if (unlikely(!bufptr)) { - dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); + dev_warn_ratelimited(netcp->ndev_dev, + "Primary RX buffer alloc failed\n"); goto fail; } dma = dma_map_single(netcp->dev, bufptr, buf_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(netcp->dev, dma))) + goto fail; + pad[0] = (u32)bufptr; } else { /* Allocate a secondary receive queue entry */ - page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); + page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); if (unlikely(!page)) { dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); goto fail; @@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) /* Map the linear buffer */ dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); - if (unlikely(!dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); return NULL; } @@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev) knav_queue_disable_notify(netcp->rx_queue); /* open Rx FDQs */ - for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && - netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i]; + ++i) { snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { @@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device, netcp->rx_queue_depths[0] = 128; } - ret = of_property_read_u32_array(node_interface, "rx-buffer-size", - netcp->rx_buffer_sizes, - KNAV_DMA_FDQ_PER_CHAN); - if (ret) { - dev_err(dev, "missing \"rx-buffer-size\" parameter\n"); - netcp->rx_buffer_sizes[0] = 1536; - } - ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); if (ret < 0) { dev_err(dev, "missing \"rx-pool\" parameter\n"); @@ -2112,6 +2103,7 @@ probe_quit: static int netcp_remove(struct platform_device *pdev) { struct netcp_device *netcp_device = platform_get_drvdata(pdev); + struct netcp_intf *netcp_intf, *netcp_tmp; struct netcp_inst_modpriv *inst_modpriv, *tmp; struct netcp_module *module; @@ -2123,10 +2115,17 @@ static int netcp_remove(struct platform_device *pdev) list_del(&inst_modpriv->inst_list); kfree(inst_modpriv); } - WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n", - pdev->name); - devm_kfree(&pdev->dev, netcp_device); + /* now that all modules are removed, clean up the interfaces */ + list_for_each_entry_safe(netcp_intf, netcp_tmp, + &netcp_device->interface_head, + interface_list) { + netcp_delete_interface(netcp_device, netcp_intf->ndev); + } + + WARN(!list_empty(&netcp_device->interface_head), + "%s interface list not empty!\n", pdev->name); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 9b7e0a34c98b..1974a8ae764a 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); } +static void gbe_sgmii_rtreset(struct gbe_priv *priv, + struct gbe_slave *slave, bool set) +{ + void __iomem *sgmii_port_regs; + + if (SLAVE_LINK_IS_XGMII(slave)) + return; + + if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) + sgmii_port_regs = priv->sgmii_port34_regs; + else + sgmii_port_regs = priv->sgmii_port_regs; + + netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set); +} + static void gbe_slave_stop(struct gbe_intf *intf) { struct gbe_priv *gbe_dev = intf->gbe_dev; struct gbe_slave *slave = intf->slave; + gbe_sgmii_rtreset(gbe_dev, slave, true); gbe_port_reset(slave); /* Disable forwarding */ cpsw_ale_control_set(gbe_dev->ale, slave->port_num, @@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) gbe_sgmii_config(priv, slave); gbe_port_reset(slave); + gbe_sgmii_rtreset(priv, slave, false); gbe_port_config(priv, slave, priv->rx_packet_max); gbe_set_slave_mac(slave, gbe_intf); /* enable forwarding */ @@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev) { struct gbe_slave *slave; - for (;;) { + while (!list_empty(&gbe_dev->secondary_slaves)) { slave = first_sec_slave(gbe_dev); - if (!slave) - break; + if (slave->phy) phy_disconnect(slave->phy); list_del(&slave->slave_list); @@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, &gbe_dev->dma_chan_name); if (ret < 0) { dev_err(dev, "missing \"tx-channel\" parameter\n"); - ret = -ENODEV; - goto quit; + return -EINVAL; } if (!strcmp(node->name, "gbe")) { ret = get_gbe_resource_version(gbe_dev, node); if (ret) - goto quit; + return ret; dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); @@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, else ret = -ENODEV; - if (ret) - goto quit; } else if (!strcmp(node->name, "xgbe")) { ret = set_xgbe_ethss10_priv(gbe_dev, node); if (ret) - goto quit; + return ret; ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, gbe_dev->ss_regs); - if (ret) - goto quit; } else { dev_err(dev, "unknown GBE node(%s)\n", node->name); ret = -ENODEV; - goto quit; } + if (ret) + return ret; + interfaces = of_get_child_by_name(node, "interfaces"); if (!interfaces) dev_err(dev, "could not find interfaces\n"); @@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); if (ret) - goto quit; + return ret; ret = netcp_txpipe_open(&gbe_dev->tx_pipe); if (ret) - goto quit; + return ret; /* Create network interfaces */ INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); @@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) break; } + of_node_put(interfaces); if (!gbe_dev->num_slaves) dev_warn(dev, "No network interface configured\n"); @@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, of_node_put(secondary_ports); if (!gbe_dev->num_slaves) { - dev_err(dev, "No network interface or secondary ports configured\n"); + dev_err(dev, + "No network interface or secondary ports configured\n"); ret = -ENODEV; - goto quit; + goto free_sec_ports; } memset(&ale_params, 0, sizeof(ale_params)); @@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (!gbe_dev->ale) { dev_err(gbe_dev->dev, "error initializing ale engine\n"); ret = -ENODEV; - goto quit; + goto free_sec_ports; } else { dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); } @@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, *inst_priv = gbe_dev; return 0; -quit: - if (gbe_dev->hw_stats) - devm_kfree(dev, gbe_dev->hw_stats); - cpsw_ale_destroy(gbe_dev->ale); - if (gbe_dev->ss_regs) - devm_iounmap(dev, gbe_dev->ss_regs); - of_node_put(interfaces); - devm_kfree(dev, gbe_dev); +free_sec_ports: + free_secondary_ports(gbe_dev); return ret; } @@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv) free_secondary_ports(gbe_dev); if (!list_empty(&gbe_dev->gbe_intf_head)) - dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); + dev_alert(gbe_dev->dev, + "unreleased ethss interfaces present\n"); - devm_kfree(gbe_dev->dev, gbe_dev->hw_stats); - devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs); - memset(gbe_dev, 0x00, sizeof(*gbe_dev)); - devm_kfree(gbe_dev->dev, gbe_dev); return 0; } diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c index dbeb14266e2f..5d8419f658d0 100644 --- a/drivers/net/ethernet/ti/netcp_sgmii.c +++ b/drivers/net/ethernet/ti/netcp_sgmii.c @@ -18,6 +18,9 @@ #include "netcp.h" +#define SGMII_SRESET_RESET BIT(0) +#define SGMII_SRESET_RTRESET BIT(1) + #define SGMII_REG_STATUS_LOCK BIT(4) #define SGMII_REG_STATUS_LINK BIT(0) #define SGMII_REG_STATUS_AUTONEG BIT(2) @@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val) int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) { /* Soft reset */ - sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); - while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) + sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), + SGMII_SRESET_RESET); + + while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) & + SGMII_SRESET_RESET) != 0x0) ; + return 0; } +/* port is 0 based */ +bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set) +{ + u32 reg; + bool oldval; + + /* Initiate a soft reset */ + reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)); + oldval = (reg & SGMII_SRESET_RTRESET) != 0x0; + if (set) + reg |= SGMII_SRESET_RTRESET; + else + reg &= ~SGMII_SRESET_RTRESET; + sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg); + wmb(); + + return oldval; +} + int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) { u32 status = 0, link = 0; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 2ffbf13471d0..216bfd350169 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty) dev->type = ARPHRD_AX25; /* Perform the low-level AX25 initialization. */ - if ((err = ax_open(ax->dev))) { + err = ax_open(ax->dev); + if (err) goto out_free_netdev; - } - if (register_netdev(dev)) + err = register_netdev(dev); + if (err) goto out_free_buffers; /* after register_netdev() - because else printk smashes the kernel */ diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 3b933bb5a8d5..edd77342773a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; int copylen = 0; + int depth; bool zerocopy = false; size_t linear; ssize_t n; @@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, skb_probe_transport_header(skb, ETH_HLEN); + /* Move network header to the right position for VLAN tagged packets */ + if ((skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) && + __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + skb_set_network_header(skb, depth); + rcu_read_lock(); vlan = rcu_dereference(q->vlan); /* copy skb_ubuf_info for callback when skb has no error */ diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 3cc316cb7e6b..d8757bf9ad75 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); + if (len < 0) { + ndev->stats.rx_errors++; + ndev->stats.rx_length_errors++; + goto enqueue_again; + } + skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; @@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, return; } +enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { dev_kfree_skb(skb); @@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev) rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ndev->mtu + ETH_HLEN); - if (rc == -EINVAL) { + if (rc) { dev_kfree_skb(skb); goto err; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 7f6419ebb5e1..ad8cbc6c9ee7 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -27,7 +27,7 @@ #include <linux/usb/cdc.h> /* Version Information */ -#define DRIVER_VERSION "v1.08.0 (2015/01/13)" +#define DRIVER_VERSION "v1.08.1 (2015/07/28)" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" @@ -1902,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp) static void rtl8152_tx_timeout(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); - int i; netif_warn(tp, tx_err, netdev, "Tx timeout\n"); - for (i = 0; i < RTL8152_MAX_TX; i++) - usb_unlink_urb(tp->tx_info[i].urb); + + usb_queue_reset_device(tp->intf); } static void rtl8152_set_rx_mode(struct net_device *netdev) @@ -2075,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp) { int i, ret = 0; - napi_disable(&tp->napi); INIT_LIST_HEAD(&tp->rx_done); for (i = 0; i < RTL8152_MAX_RX; i++) { INIT_LIST_HEAD(&tp->rx_info[i].list); @@ -2083,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp) if (ret) break; } - napi_enable(&tp->napi); if (ret && ++i < RTL8152_MAX_RX) { struct list_head rx_queue; @@ -2166,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV; + usb_disable_lpm(tp->udev); set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); @@ -2337,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) device_set_wakeup_enable(&tp->udev->dev, false); } +static void r8153_u1u2en(struct r8152 *tp, bool enable) +{ + u8 u1u2[8]; + + if (enable) + memset(u1u2, 0xff, sizeof(u1u2)); + else + memset(u1u2, 0x00, sizeof(u1u2)); + + usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); +} + +static void r8153_u2p3en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); + if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04) + ocp_data |= U2P3_ENABLE; + else + ocp_data &= ~U2P3_ENABLE; + ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); +} + +static void r8153_power_cut_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); + if (enable) + ocp_data |= PWR_EN | PHASE2_EN; + else + ocp_data &= ~(PWR_EN | PHASE2_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + ocp_data &= ~PCUT_STATUS; + ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); +} + +static bool rtl_can_wakeup(struct r8152 *tp) +{ + struct usb_device *udev = tp->udev; + + return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP); +} + static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) { if (enable) { u32 ocp_data; + r8153_u1u2en(tp, false); + r8153_u2p3en(tp, false); + __rtl_set_wol(tp, WAKE_ANY); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); @@ -2353,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } else { __rtl_set_wol(tp, tp->saved_wolopts); + r8153_u2p3en(tp, true); + r8153_u1u2en(tp, true); } } @@ -2599,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) set_bit(PHY_RESET, &tp->flags); } -static void r8153_u1u2en(struct r8152 *tp, bool enable) -{ - u8 u1u2[8]; - - if (enable) - memset(u1u2, 0xff, sizeof(u1u2)); - else - memset(u1u2, 0x00, sizeof(u1u2)); - - usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); -} - -static void r8153_u2p3en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); - if (enable) - ocp_data |= U2P3_ENABLE; - else - ocp_data &= ~U2P3_ENABLE; - ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); -} - -static void r8153_power_cut_en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); - if (enable) - ocp_data |= PWR_EN | PHASE2_EN; - else - ocp_data &= ~(PWR_EN | PHASE2_EN); - ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); - - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); - ocp_data &= ~PCUT_STATUS; - ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); -} - static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; @@ -2781,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp) r8153_disable_aldps(tp); rtl_disable(tp); r8153_enable_aldps(tp); + usb_enable_lpm(tp->udev); } static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) @@ -2901,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + r8153_u1u2en(tp, false); r8153_disable_aldps(tp); r8153_first_init(tp); r8153_enable_aldps(tp); + r8153_u2p3en(tp, true); + r8153_u1u2en(tp, true); + usb_enable_lpm(tp->udev); } static void rtl8153_down(struct r8152 *tp) @@ -2914,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp) } r8153_u1u2en(tp, false); + r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); r8153_disable_aldps(tp); r8153_enter_oob(tp); @@ -2932,8 +2948,10 @@ static void set_carrier(struct r8152 *tp) if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); set_bit(RTL8152_SET_RX_MODE, &tp->flags); + napi_disable(&tp->napi); netif_carrier_on(netdev); rtl_start_rx(tp); + napi_enable(&tp->napi); } } else { if (netif_carrier_ok(netdev)) { @@ -3252,6 +3270,7 @@ static void r8153_init(struct r8152 *tp) msleep(20); } + usb_disable_lpm(tp->udev); r8153_u2p3en(tp, false); if (tp->version == RTL_VER_04) { @@ -3319,6 +3338,59 @@ static void r8153_init(struct r8152 *tp) r8153_enable_aldps(tp); r8152b_enable_fc(tp); rtl_tally_reset(tp); + r8153_u2p3en(tp, true); +} + +static int rtl8152_pre_reset(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + struct net_device *netdev; + + if (!tp) + return 0; + + netdev = tp->netdev; + if (!netif_running(netdev)) + return 0; + + napi_disable(&tp->napi); + clear_bit(WORK_ENABLE, &tp->flags); + usb_kill_urb(tp->intr_urb); + cancel_delayed_work_sync(&tp->schedule); + if (netif_carrier_ok(netdev)) { + netif_stop_queue(netdev); + mutex_lock(&tp->control); + tp->rtl_ops.disable(tp); + mutex_unlock(&tp->control); + } + + return 0; +} + +static int rtl8152_post_reset(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + struct net_device *netdev; + + if (!tp) + return 0; + + netdev = tp->netdev; + if (!netif_running(netdev)) + return 0; + + set_bit(WORK_ENABLE, &tp->flags); + if (netif_carrier_ok(netdev)) { + mutex_lock(&tp->control); + tp->rtl_ops.enable(tp); + rtl8152_set_rx_mode(netdev); + mutex_unlock(&tp->control); + netif_wake_queue(netdev); + } + + napi_enable(&tp->napi); + + return 0; } static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) @@ -3374,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf) if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); + napi_disable(&tp->napi); set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(tp->netdev)) rtl_start_rx(tp); + napi_enable(&tp->napi); } else { tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, @@ -3403,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (usb_autopm_get_interface(tp->intf) < 0) return; - mutex_lock(&tp->control); - - wol->supported = WAKE_ANY; - wol->wolopts = __rtl_get_wol(tp); - - mutex_unlock(&tp->control); + if (!rtl_can_wakeup(tp)) { + wol->supported = 0; + wol->wolopts = 0; + } else { + mutex_lock(&tp->control); + wol->supported = WAKE_ANY; + wol->wolopts = __rtl_get_wol(tp); + mutex_unlock(&tp->control); + } usb_autopm_put_interface(tp->intf); } @@ -3418,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct r8152 *tp = netdev_priv(dev); int ret; + if (!rtl_can_wakeup(tp)) + return -EOPNOTSUPP; + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out_set_wol; @@ -4059,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf, goto out1; } + if (!rtl_can_wakeup(tp)) + __rtl_set_wol(tp, 0); + tp->saved_wolopts = __rtl_get_wol(tp); if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); @@ -4132,6 +4215,8 @@ static struct usb_driver rtl8152_driver = { .suspend = rtl8152_suspend, .resume = rtl8152_resume, .reset_resume = rtl8152_resume, + .pre_reset = rtl8152_pre_reset, + .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7fbca37a1adf..237f8e5e493d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev) /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ - dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (csum) - dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 7193b7304fdd..848ea6a399f2 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma) chan->netdev->base_addr = chan->cosa->datareg; chan->netdev->irq = chan->cosa->irq; chan->netdev->dma = chan->cosa->dma; - if (register_hdlc_device(chan->netdev)) { + err = register_hdlc_device(chan->netdev); + if (err) { netdev_warn(chan->netdev, "register_hdlc_device() failed\n"); free_netdev(chan->netdev); diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 25d1cbd34306..b2f0d245bcf3 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) switch (phy->rev) { case 6: case 5: - if (sprom->fem.ghz5.extpa_gain == 3) + if (sprom->fem.ghz2.extpa_gain == 3) return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; /* fall through */ case 4: diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5000bfcded61..5514ad6d4e54 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->scan_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); - if (iwl_mvm_scan_total_iterations(params) == 0) + if (iwl_mvm_scan_total_iterations(params) == 1) cmd->ooc_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); else diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 6203c4ad9bba..9e144e71da0b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); - else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } mdelay(5); } @@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) if (ret >= 0) return 0; + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + msleep(1); + for (iter = 0; iter < 10; iter++) { /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, @@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; + if (ret >= 0) { + ret = 0; + goto out; + } usleep_range(200, 1000); t += 200; @@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) IWL_ERR(trans, "Couldn't prepare the card\n"); +out: + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + return ret; } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 2b86c2135de3..607acb53c847 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr) { - if (txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); iwl_trans_pcie_ref(trans); } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b6cc9ff47fc2..1c6788aecc62 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) (struct rsi_91x_sdiodev *)adapter->rsi_dev; u32 len; u32 num_blocks; + const u8 *fw; const struct firmware *fw_entry = NULL; u32 block_size = dev->tx_blk_size; int status = 0; @@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ + fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); - status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); + status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 1106ce76707e..30c2cf7fa93b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 3b3a88b53b11..585d0883c7e5 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + struct rtl_tcb_desc tcb_desc; - if (skb) - rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); + if (skb) { + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); + } } static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c index 1017f02d7bf7..7bf88d9dcdc3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 1a83e190fc15..28577a31549d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) { atomic_dec(&queue->inflight_packets); + + /* Wake the dealloc thread _after_ decrementing inflight_packets so + * that if kthread_stop() has already been called, the dealloc thread + * does not wait forever with nothing to wake it. + */ + wake_up(&queue->dealloc_wq); } int xenvif_schedulable(struct xenvif *vif) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 7d50711476fe..3f44b522b831 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, struct sk_buff *skb, struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop) + struct gnttab_map_grant_ref *gop, + unsigned int frag_overflow, + struct sk_buff *nskb) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; int start; pending_ring_idx_t index; - unsigned int nr_slots, frag_overflow = 0; + unsigned int nr_slots; - /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. - */ - if (shinfo->nr_frags > MAX_SKB_FRAGS) { - frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; - BUG_ON(frag_overflow > MAX_SKB_FRAGS); - shinfo->nr_frags = MAX_SKB_FRAGS; - } nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ @@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que } if (frag_overflow) { - struct sk_buff *nskb = xenvif_alloc_skb(0); - if (unlikely(nskb == NULL)) { - if (net_ratelimit()) - netdev_err(queue->vif->dev, - "Can't allocate the frag_list skb.\n"); - return NULL; - } shinfo = skb_shinfo(nskb); frags = shinfo->frags; @@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; - struct sk_buff *skb; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops; + struct sk_buff *skb, *nskb; int ret; + unsigned int frag_overflow; while (skb_queue_len(&queue->tx_queue) < budget) { struct xen_netif_tx_request txreq; @@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, break; } + skb_shinfo(skb)->nr_frags = ret; + if (data_len < txreq.size) + skb_shinfo(skb)->nr_frags++; + /* At this point shinfo->nr_frags is in fact the number of + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. + */ + frag_overflow = 0; + nskb = NULL; + if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { + frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; + BUG_ON(frag_overflow > MAX_SKB_FRAGS); + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; + nskb = xenvif_alloc_skb(0); + if (unlikely(nskb == NULL)) { + kfree_skb(skb); + xenvif_tx_err(queue, &txreq, idx); + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Can't allocate the frag_list skb.\n"); + break; + } + } + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; @@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); + kfree_skb(nskb); break; } } @@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, (*copy_ops)++; - skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { - skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); @@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->pending_cons++; - request_gop = xenvif_get_requests(queue, skb, txfrags, gop); - if (request_gop == NULL) { - kfree_skb(skb); - xenvif_tx_err(queue, &txreq, idx); - break; - } - gop = request_gop; + gop = xenvif_get_requests(queue, skb, txfrags, gop, + frag_overflow, nskb); __skb_queue_tail(&queue->tx_queue, skb); @@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) smp_wmb(); queue->dealloc_prod++; } while (ubuf); - wake_up(&queue->dealloc_wq); spin_unlock_irqrestore(&queue->callback_lock, flags); if (likely(zerocopy_success)) diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c index 23435f2a5486..2e2530743831 100644 --- a/drivers/ntb/ntb.c +++ b/drivers/ntb/ntb.c @@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb) ntb->dev.bus = &ntb_bus; ntb->dev.parent = &ntb->pdev->dev; ntb->dev.release = ntb_dev_release; - dev_set_name(&ntb->dev, pci_name(ntb->pdev)); + dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev)); ntb->ctx = NULL; ntb->ctx_ops = NULL; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index efe3ad4122f2..1c6386d5f79c 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -142,10 +142,11 @@ struct ntb_transport_qp { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); + struct list_head rx_post_q; struct list_head rx_pend_q; struct list_head rx_free_q; - spinlock_t ntb_rx_pend_q_lock; - spinlock_t ntb_rx_free_q_lock; + /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ + spinlock_t ntb_rx_q_lock; void *rx_buff; unsigned int rx_index; unsigned int rx_max_entry; @@ -211,6 +212,8 @@ struct ntb_transport_ctx { bool link_is_up; struct delayed_work link_work; struct work_struct link_cleanup; + + struct dentry *debugfs_node_dir; }; enum { @@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, char *buf; ssize_t ret, out_offset, out_count; + qp = filp->private_data; + + if (!qp || !qp->link_is_up) + return 0; + out_count = 1000; buf = kmalloc(out_count, GFP_KERNEL); if (!buf) return -ENOMEM; - qp = filp->private_data; out_offset = 0; out_offset += snprintf(buf + out_offset, out_count - out_offset, "NTB QP stats\n"); @@ -534,6 +541,27 @@ out: return entry; } +static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, + struct list_head *list, + struct list_head *to_list) +{ + struct ntb_queue_entry *entry; + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + if (list_empty(list)) { + entry = NULL; + } else { + entry = list_first_entry(list, struct ntb_queue_entry, entry); + list_move_tail(&entry->entry, to_list); + } + + spin_unlock_irqrestore(lock, flags); + + return entry; +} + static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { @@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) } static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, - unsigned int size) + resource_size_t size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct pci_dev *pdev = nt->ndev->pdev; - unsigned int xlat_size, buff_size; + size_t xlat_size, buff_size; int rc; + if (!size) + return -EINVAL; + xlat_size = round_up(size, mw->xlat_align_size); buff_size = round_up(size, mw->xlat_align); @@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, if (!mw->virt_addr) { mw->xlat_size = 0; mw->buff_size = 0; - dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", + dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", buff_size); return -ENOMEM; } @@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work) if (qp->event_handler) qp->event_handler(qp->cb_data, qp->link_is_up); + + tasklet_schedule(&qp->rxc_db_work); } else if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); @@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; - if (nt_debugfs_dir) { + if (nt->debugfs_node_dir) { char debugfs_name[4]; snprintf(debugfs_name, 4, "qp%d", qp_num); qp->debugfs_dir = debugfs_create_dir(debugfs_name, - nt_debugfs_dir); + nt->debugfs_node_dir); qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, qp->debugfs_dir, qp, @@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); - spin_lock_init(&qp->ntb_rx_pend_q_lock); - spin_lock_init(&qp->ntb_rx_free_q_lock); + spin_lock_init(&qp->ntb_rx_q_lock); spin_lock_init(&qp->ntb_tx_free_q_lock); + INIT_LIST_HEAD(&qp->rx_post_q); INIT_LIST_HEAD(&qp->rx_pend_q); INIT_LIST_HEAD(&qp->rx_free_q); INIT_LIST_HEAD(&qp->tx_free_q); @@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) goto err2; } + if (nt_debugfs_dir) { + nt->debugfs_node_dir = + debugfs_create_dir(pci_name(ndev->pdev), + nt_debugfs_dir); + } + for (i = 0; i < qp_count; i++) { rc = ntb_transport_init_queue(nt, i); if (rc) @@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) kfree(nt); } -static void ntb_rx_copy_callback(void *data) +static void ntb_complete_rxc(struct ntb_transport_qp *qp) { - struct ntb_queue_entry *entry = data; - struct ntb_transport_qp *qp = entry->qp; - void *cb_data = entry->cb_data; - unsigned int len = entry->len; - struct ntb_payload_header *hdr = entry->rx_hdr; + struct ntb_queue_entry *entry; + void *cb_data; + unsigned int len; + unsigned long irqflags; + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + + while (!list_empty(&qp->rx_post_q)) { + entry = list_first_entry(&qp->rx_post_q, + struct ntb_queue_entry, entry); + if (!(entry->flags & DESC_DONE_FLAG)) + break; + + entry->rx_hdr->flags = 0; + iowrite32(entry->index, &qp->rx_info->entry); - hdr->flags = 0; + cb_data = entry->cb_data; + len = entry->len; - iowrite32(entry->index, &qp->rx_info->entry); + list_move_tail(&entry->entry, &qp->rx_free_q); - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); - if (qp->rx_handler && qp->client_ready) - qp->rx_handler(qp, qp->cb_data, cb_data, len); + if (qp->rx_handler && qp->client_ready) + qp->rx_handler(qp, qp->cb_data, cb_data, len); + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + } + + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); +} + +static void ntb_rx_copy_callback(void *data) +{ + struct ntb_queue_entry *entry = data; + + entry->flags |= DESC_DONE_FLAG; + + ntb_complete_rxc(entry->qp); } static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) @@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) ntb_rx_copy_callback(entry); } -static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, - size_t len) +static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) { struct dma_async_tx_descriptor *txd; struct ntb_transport_qp *qp = entry->qp; struct dma_chan *chan = qp->dma_chan; struct dma_device *device; - size_t pay_off, buff_off; + size_t pay_off, buff_off, len; struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - entry->len = len; + len = entry->len; if (!chan) goto err; @@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; void *offset; - int rc; offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); @@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) return -EIO; } - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); if (!entry) { dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); qp->rx_err_no_buf++; - - rc = -ENOMEM; - goto err; + return -EAGAIN; } + entry->rx_hdr = hdr; + entry->index = qp->rx_index; + if (hdr->len > entry->len) { dev_dbg(&qp->ndev->pdev->dev, "receive buffer overflow! Wanted %d got %d\n", hdr->len, entry->len); qp->rx_err_oflow++; - rc = -EIO; - goto err; - } + entry->len = -EIO; + entry->flags |= DESC_DONE_FLAG; - dev_dbg(&qp->ndev->pdev->dev, - "RX OK index %u ver %u size %d into buf size %d\n", - qp->rx_index, hdr->ver, hdr->len, entry->len); + ntb_complete_rxc(qp); + } else { + dev_dbg(&qp->ndev->pdev->dev, + "RX OK index %u ver %u size %d into buf size %d\n", + qp->rx_index, hdr->ver, hdr->len, entry->len); - qp->rx_bytes += hdr->len; - qp->rx_pkts++; + qp->rx_bytes += hdr->len; + qp->rx_pkts++; - entry->index = qp->rx_index; - entry->rx_hdr = hdr; + entry->len = hdr->len; - ntb_async_rx(entry, offset, hdr->len); + ntb_async_rx(entry, offset); + } qp->rx_index++; qp->rx_index %= qp->rx_max_entry; return 0; - -err: - /* FIXME: if this syncrhonous update of the rx_index gets ahead of - * asyncrhonous ntb_rx_copy_callback of previous entry, there are three - * scenarios: - * - * 1) The peer might miss this update, but observe the update - * from the memcpy completion callback. In this case, the buffer will - * not be freed on the peer to be reused for a different packet. The - * successful rx of a later packet would clear the condition, but the - * condition could persist if several rx fail in a row. - * - * 2) The peer may observe this update before the asyncrhonous copy of - * prior packets is completed. The peer may overwrite the buffers of - * the prior packets before they are copied. - * - * 3) Both: the peer may observe the update, and then observe the index - * decrement by the asynchronous completion callback. Who knows what - * badness that will cause. - */ - hdr->flags = 0; - iowrite32(qp->rx_index, &qp->rx_info->entry); - - return rc; } static void ntb_transport_rxc_db(unsigned long data) @@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data) break; } - if (qp->dma_chan) + if (i && qp->dma_chan) dma_async_issue_pending(qp->dma_chan); if (i == qp->rx_max_entry) { @@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, goto err1; entry->qp = qp; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); } @@ -1634,7 +1674,7 @@ err2: while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); err1: - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); if (qp->dma_chan) dma_release_channel(qp->dma_chan); @@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); */ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { - struct ntb_transport_ctx *nt = qp->transport; struct pci_dev *pdev; struct ntb_queue_entry *entry; u64 qp_bit; @@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) qp->tx_handler = NULL; qp->event_handler = NULL; - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); - while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { - dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); + kfree(entry); + } + + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); kfree(entry); } while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); - nt->qp_bitmap_free |= qp_bit; + qp->transport->qp_bitmap_free |= qp_bit; dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); } @@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) if (!qp || qp->client_ready) return NULL; - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); if (!entry) return NULL; buf = entry->cb_data; *len = entry->len; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); return buf; } @@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, if (!qp) return -EINVAL; - entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); if (!entry) return -ENOMEM; entry->cb_data = cb; entry->buf = data; entry->len = len; + entry->flags = 0; + + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); + tasklet_schedule(&qp->rxc_db_work); return 0; } diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 8df1b1777745..59bb8556e43a 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -47,7 +47,7 @@ config OF_DYNAMIC config OF_ADDRESS def_bool y - depends on !SPARC + depends on !SPARC && HAS_IOMEM select OF_ADDRESS_PCI if PCI config OF_ADDRESS_PCI diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 18016341d5a9..9f71770b6226 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = { .remove = unittest_remove, .driver = { .name = "unittest", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(unittest_match), }, }; @@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = { static struct i2c_driver unittest_i2c_dev_driver = { .driver = { .name = "unittest-i2c-dev", - .owner = THIS_MODULE, }, .probe = unittest_i2c_dev_probe, .remove = unittest_i2c_dev_remove, @@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = { static struct i2c_driver unittest_i2c_mux_driver = { .driver = { .name = "unittest-i2c-mux", - .owner = THIS_MODULE, }, .probe = unittest_i2c_mux_probe, .remove = unittest_i2c_mux_remove, diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index e17c539e4f6f..2dad7e820ff0 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled) sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); } +EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect); static struct phy_ops sun4i_usb_phy_ops = { .init = sun4i_usb_phy_init, diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 3510b81db3fa..08020dc2c7c8 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -28,6 +28,8 @@ #include <linux/delay.h> #include <linux/phy/omap_control_phy.h> #include <linux/of_platform.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #define PLL_STATUS 0x00000004 #define PLL_GO 0x00000008 @@ -52,6 +54,8 @@ #define PLL_LOCK 0x2 #define PLL_IDLE 0x1 +#define SATA_PLL_SOFT_RESET BIT(18) + /* * This is an Empirical value that works, need to confirm the actual * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status @@ -82,6 +86,9 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; + struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */ + unsigned int dpll_reset_reg; /* reg. index within syscon */ + bool sata_refclk_enabled; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x) u32 val; unsigned long timeout; - /* SATA DPLL can't be powered down due to Errata i783 */ - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) + /* If dpll_reset_syscon is not present we wont power down SATA DPLL + * due to Errata i783 + */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") && + !phy->dpll_reset_syscon) return 0; /* PCIe doesn't have internal DPLL */ @@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x) } } + /* i783: SATA needs control bit toggle after PLL unlock */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) { + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET); + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, 0); + } + ti_pipe3_disable_clocks(phy); return 0; @@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev) } } else { phy->wkupclk = ERR_PTR(-ENODEV); + phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-pllreset"); + if (IS_ERR(phy->dpll_reset_syscon)) { + dev_info(&pdev->dev, + "can't get syscon-pllreset, sata dpll won't idle\n"); + phy->dpll_reset_syscon = NULL; + } else { + if (of_property_read_u32_index(node, + "syscon-pllreset", 1, + &phy->dpll_reset_reg)) { + dev_err(&pdev->dev, + "couldn't get pllreset reg. offset\n"); + return -EINVAL; + } + } } if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { @@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); - /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ - if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) - if (!IS_ERR(phy->refclk)) + + /* + * Prevent auto-disable of refclk for SATA PHY due to Errata i783 + */ + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) { + if (!IS_ERR(phy->refclk)) { clk_prepare_enable(phy->refclk); + phy->sata_refclk_enabled = true; + } + } generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) @@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) { if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); - if (!IS_ERR(phy->refclk)) + if (!IS_ERR(phy->refclk)) { clk_disable_unprepare(phy->refclk); + /* + * SATA refclk needs an additional disable as we left it + * on in probe to avoid Errata i783 + */ + if (phy->sata_refclk_enabled) { + clk_disable_unprepare(phy->refclk); + phy->sata_refclk_enabled = false; + } + } + if (!IS_ERR(phy->div_clk)) clk_disable_unprepare(phy->div_clk); } diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index cb1329919527..3271cd1abe7c 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -4,7 +4,6 @@ menuconfig CHROME_PLATFORMS bool "Platform support for Chrome hardware" - depends on X86 || ARM ---help--- Say Y here to get to see options for platform support for various Chromebooks and Chromeboxes. This option alone does diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 882744852aac..a9aa38903efe 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, { struct ipr_trace_entry *trace_entry; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned int trace_index; - trace_entry = &ioa_cfg->trace[atomic_add_return - (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; + trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; + trace_entry = &ioa_cfg->trace[trace_index]; trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; @@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) { + unsigned int hrrq; + if (ioa_cfg->hrrq_num == 1) - return 0; - else - return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; + hrrq = 0; + else { + hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); + hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; + } + return hrrq; } /** @@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - unsigned long hrrq_flags; + unsigned long lock_flags; scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(scsi_cmd); - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + spin_lock(&ipr_cmd->hrrq->_lock); ipr_erp_start(ioa_cfg, ipr_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock(&ipr_cmd->hrrq->_lock); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 73790a1d0969..6b97ee45c7b4 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg { #define IPR_NUM_TRACE_INDEX_BITS 8 #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) char trace_start[8]; #define IPR_TRACE_START_LABEL "trace" diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 1b3a09473452..30f9ef0c0d4f 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, if (resp) { resp(sp, fp, arg); res = true; - } else if (!IS_ERR(fp)) { - fc_frame_free(fp); } spin_lock_bh(&ep->ex_lock); @@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) * If new exch resp handler is valid then call that * first. */ - fc_invoke_resp(ep, sp, fp); + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); fc_exch_release(ep); return; @@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) fc_exch_hold(ep); if (!rc) fc_exch_delete(ep); - fc_invoke_resp(ep, sp, fp); + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); if (has_rec) fc_exch_timer_set(ep, ep->r_a_tov); fc_exch_release(ep); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index c6795941b45d..2d5909c4685c 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1039,11 +1039,26 @@ restart: fc_fcp_pkt_hold(fsp); spin_unlock_irqrestore(&si->scsi_queue_lock, flags); - if (!fc_fcp_lock_pkt(fsp)) { + spin_lock_bh(&fsp->scsi_pkt_lock); + if (!(fsp->state & FC_SRB_COMPL)) { + fsp->state |= FC_SRB_COMPL; + /* + * TODO: dropping scsi_pkt_lock and then reacquiring + * again around fc_fcp_cleanup_cmd() is required, + * since fc_fcp_cleanup_cmd() calls into + * fc_seq_set_resp() and that func preempts cpu using + * schedule. May be schedule and related code should be + * removed instead of unlocking here to avoid scheduling + * while atomic bug. + */ + spin_unlock_bh(&fsp->scsi_pkt_lock); + fc_fcp_cleanup_cmd(fsp, error); + + spin_lock_bh(&fsp->scsi_pkt_lock); fc_io_compl(fsp); - fc_fcp_unlock_pkt(fsp); } + spin_unlock_bh(&fsp->scsi_pkt_lock); fc_fcp_pkt_release(fsp); spin_lock_irqsave(&si->scsi_queue_lock, flags); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 8053f24f0349..98d9bb6ff725 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; - unsigned long flags; del_timer_sync(&conn->transport_timer); + mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; if (session->leadconn == conn) { @@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) } spin_unlock_bh(&session->frwd_lock); - /* - * Block until all in-progress commands for this connection - * time out or fail. - */ - for (;;) { - spin_lock_irqsave(session->host->host_lock, flags); - if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */ - spin_unlock_irqrestore(session->host->host_lock, flags); - break; - } - spin_unlock_irqrestore(session->host->host_lock, flags); - msleep_interruptible(500); - iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " - "host_busy %d host_failed %d\n", - atomic_read(&session->host->host_busy), - session->host->host_failed); - /* - * force eh_abort() to unblock - */ - wake_up(&conn->ehwait); - } - /* flush queued up work because we free the connection below */ iscsi_suspend_tx(conn); @@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) if (session->leadconn == conn) session->leadconn = NULL; spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); iscsi_destroy_conn(cls_conn); } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 82b92c414a9c..437254e1c4de 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); - if (IS_QLA83XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 0e6ee3ca30e6..8b011aef12bd 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -67,10 +67,10 @@ * | | | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | - * | Target Mode | 0xe079 | | - * | Target Mode Management | 0xf072 | 0xf002 | + * | Target Mode | 0xe080 | | + * | Target Mode Management | 0xf096 | 0xf002 | * | | | 0xf046-0xf049 | - * | Target Mode Task Management | 0x1000b | | + * | Target Mode Task Management | 0x1000d | | * ---------------------------------------------------------------------- */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e86201d3b8c6..9ad819edcd67 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -274,6 +274,7 @@ #define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ struct req_que; +struct qla_tgt_sess; /* * (sd.h is not exported, hence local inclusion) @@ -2026,6 +2027,7 @@ typedef struct fc_port { uint16_t port_id; unsigned long retry_delay_timestamp; + struct qla_tgt_sess *tgt_session; } fc_port_t; #include "qla_mr.h" @@ -3154,13 +3156,13 @@ struct qla_hw_data { /* Bit 21 of fw_attributes decides the MCTP capabilities */ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ ((ha)->fw_attributes_ext[0] & BIT_0)) -#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) -#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) +#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) -#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) +#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) -#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) +#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) #define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) #define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) @@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host { uint16_t fcoe_fcf_idx; uint8_t fcoe_vn_port_mac[6]; + /* list of commands waiting on workqueue */ + struct list_head qla_cmd_list; + struct list_head qla_sess_op_cmd_list; + spinlock_t cmd_list_lock; + + /* Counter to detect races between ELS and RSCN events */ + atomic_t generation_tick; + /* Time when global fcport update has been scheduled */ + int total_fcport_update_gen; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 664013115c9d..11f2f3279eab 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data) QLA_LOGIO_LOGIN_RETRIED : 0; qla2x00_post_async_login_done_work(fcport->vha, fcport, lio->u.logio.data); + } else if (sp->type == SRB_LOGOUT_CMD) { + qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); } } @@ -497,7 +499,10 @@ void qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { - qla2x00_mark_device_lost(vha, fcport, 1, 0); + /* Don't re-login in target mode */ + if (!fcport->tgt_session) + qla2x00_mark_device_lost(vha, fcport, 1, 0); + qlt_logo_completion_handler(fcport, data[0]); return; } @@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { - if (IS_QLA83XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); @@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues. @@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data) { fc_port_t *fcport = data; struct fc_rport *rport; - scsi_qla_host_t *vha = fcport->vha; unsigned long flags; spin_lock_irqsave(fcport->vha->host->host_lock, flags); rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) { + if (rport) fc_remote_port_delete(rport); - /* - * Release the target mode FC NEXUS in qla_target.c code - * if target mod is enabled. - */ - qlt_fc_port_deleted(vha, fcport); - } } /** @@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) * Create target mode FC NEXUS in qla_target.c if target mode is * enabled.. */ + qlt_fc_port_added(vha, fcport); spin_lock_irqsave(fcport->vha->host->host_lock, flags); @@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_ONLINE); - qla2x00_reg_remote_port(vha, fcport); - return; + goto reg_port; } fcport->login_retry = 0; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); @@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_set_fcport_state(fcport, FCS_ONLINE); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); - qla2x00_reg_remote_port(vha, fcport); + +reg_port: + if (qla_ini_mode_enabled(vha)) + qla2x00_reg_remote_port(vha, fcport); + else { + /* + * Create target mode FC NEXUS in qla_target.c + */ + qlt_fc_port_added(vha, fcport); + } } /* @@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + int discovery_gen; /* If FL port exists, then SNS is present */ if (IS_FWI2_CAPABLE(ha)) @@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fcport->scan_state = QLA_FCPORT_SCAN; } + /* Mark the time right before querying FW for connected ports. + * This process is long, asynchronous and by the time it's done, + * collected information might not be accurate anymore. E.g. + * disconnected port might have re-connected and a brand new + * session has been created. In this case session's generation + * will be newer than discovery_gen. */ + qlt_do_generation_tick(vha, &discovery_gen); + rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); if (rval != QLA_SUCCESS) break; @@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) continue; - if (fcport->scan_state == QLA_FCPORT_SCAN && - atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); + if (fcport->scan_state == QLA_FCPORT_SCAN) { + if (qla_ini_mode_enabled(base_vha) && + atomic_read(&fcport->state) == FCS_ONLINE) { + qla2x00_mark_device_lost(vha, fcport, + ql2xplogiabsentdevice, 0); + if (fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_BROADCAST) { + ha->isp_ops->fabric_logout(vha, + fcport->loop_id, + fcport->d_id.b.domain, + fcport->d_id.b.area, + fcport->d_id.b.al_pa); + qla2x00_clear_loop_id(fcport); + } + } else if (!qla_ini_mode_enabled(base_vha)) { + /* + * In target mode, explicitly kill + * sessions and log out of devices + * that are gone, so that we don't + * end up with an initiator using the + * wrong ACL (if the fabric recycles + * an FC address and we have a stale + * session around) and so that we don't + * report initiators that are no longer + * on the fabric. + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077, + "port gone, logging out/killing session: " + "%8phC state 0x%x flags 0x%x fc4_type 0x%x " + "scan_state %d\n", + fcport->port_name, + atomic_read(&fcport->state), + fcport->flags, fcport->fc4_type, + fcport->scan_state); + qlt_fc_port_deleted(vha, fcport, + discovery_gen); } } } @@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) (fcport->flags & FCF_LOGIN_NEEDED) == 0) continue; + /* + * If we're not an initiator, skip looking for devices + * and logging in. There's no reason for us to do it, + * and it seems to actively cause problems in target + * mode if we race with the initiator logging into us + * (we might get the "port ID used" status back from + * our login command and log out the initiator, which + * seems to cause havoc). + */ + if (!qla_ini_mode_enabled(base_vha)) { + if (fcport->scan_state == QLA_FCPORT_FOUND) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078, + "port %8phC state 0x%x flags 0x%x fc4_type 0x%x " + "scan_state %d (initiator mode disabled; skipping " + "login)\n", fcport->port_name, + atomic_read(&fcport->state), + fcport->flags, fcport->fc4_type, + fcport->scan_state); + } + continue; + } + if (fcport->loop_id == FC_NO_LOOP_ID) { fcport->loop_id = next_loopid; rval = qla2x00_find_new_loop_id( @@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; - /* Find a new loop ID to use. */ - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id(base_vha, fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } + /* + * If we're not an initiator, skip looking for devices + * and logging in. There's no reason for us to do it, + * and it seems to actively cause problems in target + * mode if we race with the initiator logging into us + * (we might get the "port ID used" status back from + * our login command and log out the initiator, which + * seems to cause havoc). + */ + if (qla_ini_mode_enabled(base_vha)) { + /* Find a new loop ID to use. */ + fcport->loop_id = next_loopid; + rval = qla2x00_find_new_loop_id(base_vha, + fcport); + if (rval != QLA_SUCCESS) { + /* Ran out of IDs to use */ + break; + } - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, &next_loopid); + /* Login and update database */ + qla2x00_fabric_dev_login(vha, fcport, + &next_loopid); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079, + "new port %8phC state 0x%x flags 0x%x fc4_type " + "0x%x scan_state %d (initiator mode disabled; " + "skipping login)\n", + fcport->port_name, + atomic_read(&fcport->state), + fcport->flags, fcport->fc4_type, + fcport->scan_state); + } list_move_tail(&fcport->list, &vha->vp_fcports); } @@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, fcport->fp_speed = new_fcport->fp_speed; /* - * If address the same and state FCS_ONLINE, nothing - * changed. + * If address the same and state FCS_ONLINE + * (or in target mode), nothing changed. */ if (fcport->d_id.b24 == new_fcport->d_id.b24 && - atomic_read(&fcport->state) == FCS_ONLINE) { + (atomic_read(&fcport->state) == FCS_ONLINE || + !qla_ini_mode_enabled(base_vha))) { break; } @@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, * Log it out if still logged in and mark it for * relogin later. */ + if (!qla_ini_mode_enabled(base_vha)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, + "port changed FC ID, %8phC" + " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", + fcport->port_name, + fcport->d_id.b.domain, + fcport->d_id.b.area, + fcport->d_id.b.al_pa, + fcport->loop_id, + new_fcport->d_id.b.domain, + new_fcport->d_id.b.area, + new_fcport->d_id.b.al_pa); + fcport->d_id.b24 = new_fcport->d_id.b24; + break; + } + fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; if (fcport->loop_id != FC_NO_LOOP_ID && @@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, if (found) continue; /* If device was not in our fcports list, then add it. */ + new_fcport->scan_state = QLA_FCPORT_FOUND; list_add_tail(&new_fcport->list, new_fcports); /* Allocate a new replacement fcport. */ @@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) atomic_read(&fcport->state) != FCS_UNCONFIGURED) { spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_rport_del(fcport); + + /* + * Release the target mode FC NEXUS in + * qla_target.c, if target mod is enabled. + */ + qlt_fc_port_deleted(vha, fcport, + base_vha->total_fcport_update_gen); + spin_lock_irqsave(&ha->vport_slock, flags); } } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 36fbd4c7af8f..6f02b26a35cf 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); + if (!sp->fcport->tgt_session || + !sp->fcport->tgt_session->keep_nport_handle) + logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 02b1c1c5355b..b2f713ad9034 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, *orig_iocb_cnt = mcp->mb[10]; if (vha->hw->flags.npiv_supported && max_npiv_vports) *max_npiv_vports = mcp->mb[11]; - if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) + if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || + IS_QLA27XX(vha->hw)) && max_fcfs) *max_fcfs = mcp->mb[12]; } @@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { WRT_REG_DWORD(rsp->rsp_q_out, 0); - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) WRT_REG_DWORD(rsp->rsp_q_in, 0); } @@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index a28815b8276f..8a5cac8448c7 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; @@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, spin_lock_irqsave(vha->host->host_lock, flags); fcport->drport = rport; spin_unlock_irqrestore(vha->host->host_lock, flags); + qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); } else { - fc_remote_port_delete(rport); - qlt_fc_port_deleted(vha, fcport); + int now; + if (rport) + fc_remote_port_delete(rport); + qlt_do_generation_tick(vha, &now); + qlt_fc_port_deleted(vha, fcport, now); } } @@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->vp_fcports); INIT_LIST_HEAD(&vha->work_list); INIT_LIST_HEAD(&vha->list); + INIT_LIST_HEAD(&vha->qla_cmd_list); + INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); spin_lock_init(&vha->work_lock); + spin_lock_init(&vha->cmd_list_lock); sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 028e8c8a7de9..2feb5f38edcd 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha) { uint32_t led_select_value = 0; - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) goto out; if (ha->port_no == 0) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index b749026aa592..58651ecbd88c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull); static void qlt_disable_vha(struct scsi_qla_host *vha); +static void qlt_clear_tgt_db(struct qla_tgt *tgt); +static void qlt_send_notify_ack(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy, + uint32_t add_flags, uint16_t resp_code, int resp_code_valid, + uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); /* * Global Variables */ @@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); +/* This API intentionally takes dest as a parameter, rather than returning + * int value to avoid caller forgetting to issue wmb() after the store */ +void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); + *dest = atomic_inc_return(&base_vha->generation_tick); + /* memory barrier */ + wmb(); +} + /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ static struct qla_tgt_sess *qlt_find_sess_by_port_name( struct qla_tgt *tgt, @@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work) struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; + unsigned long flags; + bool logout_started = false; + fc_port_t fcport; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, + "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" + " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n", + __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + sess->logout_on_delete, sess->keep_nport_handle, + sess->plogi_ack_needed); BUG_ON(!tgt); + + if (sess->logout_on_delete) { + int rc; + + memset(&fcport, 0, sizeof(fcport)); + fcport.loop_id = sess->loop_id; + fcport.d_id = sess->s_id; + memcpy(fcport.port_name, sess->port_name, WWN_SIZE); + fcport.vha = vha; + fcport.tgt_session = sess; + + rc = qla2x00_post_async_logout_work(vha, &fcport, NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule logo failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } + /* * Release the target session for FC Nexus from fabric module code. */ if (sess->se_sess != NULL) ha->tgt.tgt_ops->free_session(sess); + if (logout_started) { + bool traced = false; + + while (!ACCESS_ONCE(sess->logout_completed)) { + if (!traced) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, + "%s: waiting for sess %p logout\n", + __func__, sess); + traced = true; + } + msleep(100); + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, + "%s: sess %p logout completed\n", + __func__, sess); + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + + if (sess->plogi_ack_needed) + qlt_send_notify_ack(vha, &sess->tm_iocb, + 0, 0, 0, 0, 0, 0); + + list_del(&sess->sess_list_entry); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, "Unregistration of sess %p finished\n", sess); @@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - list_del(&sess->sess_list_entry); - if (sess->deleted) - list_del(&sess->del_list_entry); + if (!list_empty(&sess->del_list_entry)) + list_del_init(&sess->del_list_entry); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); @@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) loop_id = le16_to_cpu(n->u.isp24.nport_handle); if (loop_id == 0xFFFF) { -#if 0 /* FIXME: Re-enable Global event handling.. */ /* Global event */ - atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); - qlt_clear_tgt_db(ha->tgt.qla_tgt); + atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); + qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); +#if 0 /* FIXME: do we need to choose a session here? */ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { sess = list_entry(ha->tgt.qla_tgt->sess_list.next, typeof(*sess), sess_list_entry); @@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, struct qla_tgt *tgt = sess->tgt; uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; - if (sess->deleted) - return; + if (sess->deleted) { + /* Upgrade to unconditional deletion in case it was temporary */ + if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING) + list_del(&sess->del_list_entry); + else + return; + } ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, "Scheduling sess %p for deletion\n", sess); - list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); - sess->deleted = 1; - if (immediate) + if (immediate) { dev_loss_tmo = 0; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + list_add(&sess->del_list_entry, &tgt->del_sess_list); + } else { + sess->deleted = QLA_SESS_DELETION_PENDING; + list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); + } sess->expires = jiffies + dev_loss_tmo * HZ; ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " - "deletion in %u secs (expires: %lu) immed: %d\n", - sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, - sess->expires, immediate); + "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)" + " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n", + sess->vha->vp_idx, sess->port_name, sess->loop_id, + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete, + sess->generation); if (immediate) - schedule_delayed_work(&tgt->sess_del_work, 0); + mod_delayed_work(system_wq, &tgt->sess_del_work, 0); else schedule_delayed_work(&tgt->sess_del_work, sess->expires - jiffies); @@ -578,9 +663,9 @@ out_free_id_list: /* ha->hardware_lock supposed to be held on entry */ static void qlt_undelete_sess(struct qla_tgt_sess *sess) { - BUG_ON(!sess->deleted); + BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); - list_del(&sess->del_list_entry); + list_del_init(&sess->del_list_entry); sess->deleted = 0; } @@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) del_list_entry); elapsed = jiffies; if (time_after_eq(elapsed, sess->expires)) { - qlt_undelete_sess(sess); + /* No turning back */ + list_del_init(&sess->del_list_entry); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, "Timeout: sess %p about to be deleted\n", @@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess( fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); + /* Cannot undelete at this point */ + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + return NULL; + } + if (sess->deleted) qlt_undelete_sess(sess); @@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess( if (sess->local && !local) sess->local = 0; + + qlt_do_generation_tick(vha, &sess->generation); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return sess; @@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess( sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; sess->local = local; + INIT_LIST_HEAD(&sess->del_list_entry); + + /* Under normal circumstances we want to logout from firmware when + * session eventually ends and release corresponding nport handle. + * In the exception cases (e.g. when new PLOGI is waiting) corresponding + * code will adjust these flags as necessary. */ + sess->logout_on_delete = 1; + sess->keep_nport_handle = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", @@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess( spin_lock_irqsave(&ha->hardware_lock, flags); list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); vha->vha_tgt.qla_tgt->sess_count++; + qlt_do_generation_tick(vha, &sess->generation); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, @@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess( } /* - * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() + * Called from qla2x00_reg_remote_port() */ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) { @@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); + } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + /* Point of no return */ + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; } else { kref_get(&sess->se_sess->sess_kref); @@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) spin_unlock_irqrestore(&ha->hardware_lock, flags); } -void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) +/* + * max_gen - specifies maximum session generation + * at which this deletion requestion is still valid + */ +void +qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { - struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; - unsigned long flags; if (!vha->hw->tgt.tgt_ops) return; - if (!tgt || (fcport->port_type != FCT_INITIATOR)) + if (!tgt) return; - spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); return; } sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); if (!sess) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + + if (max_gen - sess->generation < 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, + "Ignoring stale deletion request for se_sess %p / sess %p" + " for port %8phC, req_gen %d, sess_gen %d\n", + sess->se_sess, sess, sess->port_name, max_gen, + sess->generation); return; } @@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) sess->local = 1; qlt_schedule_sess_for_deletion(sess, false); - spin_unlock_irqrestore(&ha->hardware_lock, flags); } static inline int test_tgt_sess_count(struct qla_tgt *tgt) @@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, FCP_TMF_CMPL, true); } +static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + + spin_lock(&vha->cmd_list_lock); + + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + if (tag == op->atio.u.isp24.exchange_addr) { + op->aborted = true; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + if (tag == cmd->atio.u.isp24.exchange_addr) { + cmd->state = QLA_TGT_STATE_ABORTED; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + + spin_unlock(&vha->cmd_list_lock); + return 0; +} + +/* drop cmds for the given lun + * XXX only looks for cmds on the port through which lun reset was recieved + * XXX does not go through the list of other port (which may have cmds + * for the same lun) + */ +static void abort_cmds_for_lun(struct scsi_qla_host *vha, + uint32_t lun, uint8_t *s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + + key = sid_to_key(s_id); + spin_lock(&vha->cmd_list_lock); + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + uint32_t op_key; + uint32_t op_lun; + + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + op_lun = scsilun_to_int( + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); + if (op_key == key && op_lun == lun) + op->aborted = true; + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key; + uint32_t cmd_lun; + + cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + cmd_lun = scsilun_to_int( + (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); + if (cmd_key == key && cmd_lun == lun) + cmd->state = QLA_TGT_STATE_ABORTED; + } + spin_unlock(&vha->cmd_list_lock); +} + /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) @@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, } spin_unlock(&se_sess->sess_cmd_lock); - if (!found_lun) - return -ENOENT; + /* cmd not in LIO lists, look in qla list */ + if (!found_lun) { + if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { + /* send TASK_ABORT response immediately */ + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false); + return 0; + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, + "unable to find cmd in driver or LIO for tag 0x%x\n", + abts->exchange_addr_to_abort); + return -ENOENT; + } + } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, "qla_target(%d): task abort (tag=%d)\n", @@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, return; } + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + return; + } + rc = __qlt_24xx_handle_abts(vha, abts, sess); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, @@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd = &cmd->se_cmd; - if (unlikely(cmd->aborted)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, - "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)", - vha->vp_idx, cmd, se_cmd, se_cmd->tag); - - cmd->state = QLA_TGT_STATE_ABORTED; - cmd->cmd_flags |= BIT_6; - - qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); - - /* !! At this point cmd could be already freed !! */ - return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; - } - prm->cmd = cmd; prm->tgt = tgt; prm->rq_result = scsi_status; @@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, unsigned long flags = 0; int res; + spin_lock_irqsave(&ha->hardware_lock, flags); + if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + cmd->state = QLA_TGT_STATE_PROCESSED; + if (cmd->sess->logout_completed) + /* no need to terminate. FW already freed exchange. */ + qlt_abort_cmd_on_host_reset(cmd->vha, cmd); + else + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + memset(&prm, 0, sizeof(prm)); qlt_check_srr_debug(cmd, &xmit_type); @@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); if (unlikely(res != 0)) { - if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) - return 0; - return res; } @@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_build_ctio_crc2_pkt(&prm, vha); else res = qlt_24xx_build_ctio_pkt(&prm, vha); - if (unlikely(res != 0)) + if (unlikely(res != 0)) { + vha->req->cnt += full_req_cnt; goto out_unmap_unlock; - + } pkt = (struct ctio7_to_24xx *)prm.pkt; @@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { + if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) || + (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { /* * Either a chip reset is active or this request was from * previous life, just abort the processing. @@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) else res = qlt_24xx_build_ctio_pkt(&prm, vha); - if (unlikely(res != 0)) + if (unlikely(res != 0)) { + vha->req->cnt += prm.req_cnt; goto out_unlock_free_unmap; + } + pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | CTIO7_FLAGS_STATUS_MODE_0); @@ -2651,6 +2850,89 @@ out: /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ +static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy) +{ + struct nack_to_isp *nack; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + int ret = 0; + + ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, + "Sending TERM ELS CTIO (ha=%p)\n", ha); + + pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); + if (pkt == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe080, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return -ENOMEM; + } + + pkt->entry_type = NOTIFY_ACK_TYPE; + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + + nack = (struct nack_to_isp *)pkt; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + + /* terminate */ + nack->u.isp24.flags |= + __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); + + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + qla2x00_start_iocbs(vha, vha->req); + return ret; +} + +static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *imm, int ha_locked) +{ + unsigned long flags = 0; + int rc; + + if (qlt_issue_marker(vha, ha_locked) < 0) + return; + + if (ha_locked) { + rc = __qlt_send_term_imm_notif(vha, imm); + +#if 0 /* Todo */ + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#endif + goto done; + } + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + rc = __qlt_send_term_imm_notif(vha, imm); + +#if 0 /* Todo */ + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#endif + +done: + if (!ha_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} + +/* If hardware_lock held on entry, might drop it, then reaquire */ +/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio) @@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, static void qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) { - unsigned long flags; + unsigned long flags = 0; int rc; if (qlt_issue_marker(vha, ha_locked) < 0) @@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, rc = __qlt_send_term_exchange(vha, cmd, atio); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, 0, 0); - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); done: if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || !cmd->cmd_sent_to_fw)) { - if (!ha_locked && !in_interrupt()) - msleep(250); /* just in case */ - - qlt_unmap_sg(vha, cmd); + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); } + + if (!ha_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + return; } @@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) } +void qlt_abort_cmd(struct qla_tgt_cmd *cmd) +{ + struct qla_tgt *tgt = cmd->tgt; + struct scsi_qla_host *vha = tgt->vha; + struct se_cmd *se_cmd = &cmd->se_cmd; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, + "qla_target(%d): terminating exchange for aborted cmd=%p " + "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, + se_cmd->tag); + + cmd->state = QLA_TGT_STATE_ABORTED; + cmd->cmd_flags |= BIT_6; + + qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); +} +EXPORT_SYMBOL(qlt_abort_cmd); + void qlt_free_cmd(struct qla_tgt_cmd *cmd) { struct qla_tgt_sess *sess = cmd->sess; @@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dump_stack(); } - cmd->cmd_flags |= BIT_12; + cmd->cmd_flags |= BIT_17; ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { - ; + cmd->cmd_flags |= BIT_12; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { int rx_status = 0; @@ -3191,9 +3492,11 @@ skip_term: ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->state == QLA_TGT_STATE_ABORTED) { + cmd->cmd_flags |= BIT_18; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { + cmd->cmd_flags |= BIT_19; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); @@ -3205,7 +3508,6 @@ skip_term: dump_stack(); } - ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) if (tgt->tgt_stop) goto out_term; + if (cmd->state == QLA_TGT_STATE_ABORTED) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, + "cmd with tag %u is aborted\n", + cmd->atio.u.isp24.exchange_addr); + goto out_term; + } + cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->se_cmd.tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( @@ -3316,6 +3625,12 @@ out_term: static void qlt_do_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + scsi_qla_host_t *vha = cmd->vha; + unsigned long flags; + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&cmd->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); __qlt_do_work(cmd); } @@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; + cmd->cmd_flags = 0; + cmd->jiffies_at_alloc = get_jiffies_64(); + + cmd->reset_count = vha->hw->chip_reset; + return cmd; } @@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work) unsigned long flags; uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&op->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + if (op->aborted) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083, + "sess_op with tag %u is aborted\n", + op->atio.u.isp24.exchange_addr); + goto out_term; + } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, - "qla_target(%d): Unable to find wwn login" - " (s_id %x:%x:%x), trying to create it manually\n", - vha->vp_idx, s_id[0], s_id[1], s_id[2]); + "qla_target(%d): Unable to find wwn login" + " (s_id %x:%x:%x), trying to create it manually\n", + vha->vp_idx, s_id[0], s_id[1], s_id[2]); if (op->atio.u.raw.entry_count > 1) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, - "Dropping multy entry atio %p\n", &op->atio); + "Dropping multy entry atio %p\n", &op->atio); goto out_term; } @@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, memcpy(&op->atio, atio, sizeof(*atio)); op->vha = vha; + + spin_lock(&vha->cmd_list_lock); + list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list); + spin_unlock(&vha->cmd_list_lock); + INIT_WORK(&op->work, qlt_create_sess_from_atio); queue_work(qla_tgt_wq, &op->work); return 0; } + + /* Another WWN used to have our s_id. Our PLOGI scheduled its + * session deletion, but it's still in sess_del_work wq */ + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + ql_dbg(ql_dbg_io, vha, 0x3061, + "New command while old session %p is being deleted\n", + sess); + return -EFAULT; + } + /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ @@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -ENOMEM; } - cmd->cmd_flags = 0; - cmd->jiffies_at_alloc = get_jiffies_64(); - - cmd->reset_count = vha->hw->chip_reset; - cmd->cmd_in_wq = 1; cmd->cmd_flags |= BIT_0; + + spin_lock(&vha->cmd_list_lock); + list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); + spin_unlock(&vha->cmd_list_lock); + INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; + struct atio_from_isp *a = (struct atio_from_isp *)iocb; int res; uint8_t tmr_func; @@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); tmr_func = TMR_LUN_RESET; + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); break; case QLA_TGT_CLEAR_TS: @@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) sizeof(struct atio_from_isp)); } + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) + return -EFAULT; + return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); } @@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha, return __qlt_abort_task(vha, iocb, sess); } +void qlt_logo_completion_handler(fc_port_t *fcport, int rc) +{ + if (fcport->tgt_session) { + if (rc != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, + "%s: se_sess %p / sess %p from" + " port %8phC loop_id %#04x s_id %02x:%02x:%02x" + " LOGO failed: %#x\n", + __func__, + fcport->tgt_session->se_sess, + fcport->tgt_session, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, rc); + } + + fcport->tgt_session->logout_completed = 1; + } +} + +static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a, + struct imm_ntfy_from_isp *b) +{ + struct imm_ntfy_from_isp tmp; + memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp)); + memcpy(a, b, sizeof(struct imm_ntfy_from_isp)); + memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp)); +} + +/* +* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) +* +* Schedules sessions with matching port_id/loop_id but different wwn for +* deletion. Returns existing session with matching wwn if present. +* Null otherwise. +*/ +static struct qla_tgt_sess * +qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, + port_id_t port_id, uint16_t loop_id) +{ + struct qla_tgt_sess *sess = NULL, *other_sess; + uint64_t other_wwn; + + list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { + + other_wwn = wwn_to_u64(other_sess->port_name); + + if (wwn == other_wwn) { + WARN_ON(sess); + sess = other_sess; + continue; + } + + /* find other sess with nport_id collision */ + if (port_id.b24 == other_sess->s_id.b24) { + if (loop_id != other_sess->loop_id) { + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* + * logout_on_delete is set by default, but another + * session that has the same s_id/loop_id combo + * might have cleared it when requested this session + * deletion, so don't touch it + */ + qlt_schedule_sess_for_deletion(other_sess, true); + } else { + /* + * Another wwn used to have our s_id/loop_id + * combo - kill the session, but don't log out + */ + sess->logout_on_delete = 0; + qlt_schedule_sess_for_deletion(other_sess, + true); + } + continue; + } + + /* find other sess with nport handle collision */ + if (loop_id == other_sess->loop_id) { + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* Same loop_id but different s_id + * Ok to kill and logout */ + qlt_schedule_sess_for_deletion(other_sess, true); + } + } + + return sess; +} + +/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ +static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + int count = 0; + + key = (((u32)s_id->b.domain << 16) | + ((u32)s_id->b.area << 8) | + ((u32)s_id->b.al_pa)); + + spin_lock(&vha->cmd_list_lock); + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + if (op_key == key) { + op->aborted = true; + count++; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + if (cmd_key == key) { + cmd->state = QLA_TGT_STATE_ABORTED; + count++; + } + } + spin_unlock(&vha->cmd_list_lock); + + return count; +} + /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess = NULL; + uint64_t wwn; + port_id_t port_id; + uint16_t loop_id; + uint16_t wd3_lo; int res = 0; + wwn = wwn_to_u64(iocb->u.isp24.port_name); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); + /* res = 1 means ack at the end of thread + * res = 0 means ack async/later. + */ switch (iocb->u.isp24.status_subcode) { case ELS_PLOGI: - case ELS_FLOGI: + + /* Mark all stale commands in qla_tgt_wq for deletion */ + abort_cmds_for_s_id(vha, &port_id); + + if (wwn) + sess = qlt_find_sess_invalidate_other(tgt, wwn, + port_id, loop_id); + + if (!sess || IS_SW_RESV_ADDR(sess->s_id)) { + res = 1; + break; + } + + if (sess->plogi_ack_needed) { + /* + * Initiator sent another PLOGI before last PLOGI could + * finish. Swap plogi iocbs and terminate old one + * without acking, new one will get acked when session + * deletion completes. + */ + ql_log(ql_log_warn, sess->vha, 0xf094, + "sess %p received double plogi.\n", sess); + + qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb); + + qlt_send_term_imm_notif(vha, iocb, 1); + + res = 0; + break; + } + + res = 0; + + /* + * Save immediate Notif IOCB for Ack when sess is done + * and being deleted. + */ + memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb)); + sess->plogi_ack_needed = 1; + + /* + * Under normal circumstances we want to release nport handle + * during LOGO process to avoid nport handle leaks inside FW. + * The exception is when LOGO is done while another PLOGI with + * the same nport handle is waiting as might be the case here. + * Note: there is always a possibily of a race where session + * deletion has already started for other reasons (e.g. ACL + * removal) and now PLOGI arrives: + * 1. if PLOGI arrived in FW after nport handle has been freed, + * FW must have assigned this PLOGI a new/same handle and we + * can proceed ACK'ing it as usual when session deletion + * completes. + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT + * bit reached it, the handle has now been released. We'll + * get an error when we ACK this PLOGI. Nothing will be sent + * back to initiator. Initiator should eventually retry + * PLOGI and situation will correct itself. + */ + sess->keep_nport_handle = ((sess->loop_id == loop_id) && + (sess->s_id.b24 == port_id.b24)); + qlt_schedule_sess_for_deletion(sess, true); + break; + case ELS_PRLI: + wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); + + if (wwn) + sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, + loop_id); + + if (sess != NULL) { + if (sess->deleted) { + /* + * Impatient initiator sent PRLI before last + * PLOGI could finish. Will force him to re-try, + * while last one finishes. + */ + ql_log(ql_log_warn, sess->vha, 0xf095, + "sess %p PRLI received, before plogi ack.\n", + sess); + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; + } + + /* + * This shouldn't happen under normal circumstances, + * since we have deleted the old session during PLOGI + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, + "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", + sess->loop_id, sess, iocb->u.isp24.nport_handle); + + sess->local = 0; + sess->loop_id = loop_id; + sess->s_id = port_id; + + if (wd3_lo & BIT_7) + sess->conf_compl_supported = 1; + + } + res = 1; /* send notify ack */ + + /* Make session global (not used in fabric mode) */ + if (ha->current_topology != ISP_CFG_F) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + /* todo: else - create sess here. */ + res = 1; /* send notify ack */ + } + + break; + case ELS_LOGO: case ELS_PRLO: res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); @@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, break; } + case ELS_FLOGI: /* should never happen */ default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, "qla_target(%d): Unsupported ELS command %x " @@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + sess = NULL; + goto out_term; + } + kref_get(&sess->se_sess->sess_kref); } @@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + sess = NULL; + goto out_term; + } + kref_get(&sess->se_sess->sess_kref); } @@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) /* Adjust ring index */ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); + RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha)); } void @@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if (ha->mqenable || IS_QLA83XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 985d76dd706b..bca584ae45b7 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -167,7 +167,24 @@ struct imm_ntfy_from_isp { uint32_t srr_rel_offs; uint16_t srr_ui; uint16_t srr_ox_id; - uint8_t reserved_4[19]; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ + uint16_t wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; + uint16_t nport_handle; + uint16_t resv2; + } req_els; + } u; + uint8_t port_name[8]; + uint8_t resv3[3]; uint8_t vp_index; uint32_t reserved_5; uint8_t port_id[3]; @@ -234,6 +251,7 @@ struct nack_to_isp { uint8_t reserved[2]; uint16_t ox_id; } __packed; +#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 #define NOTIFY_ACK_SRR_FLAGS_REJECT 1 @@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define FC_TM_REJECT 4 #define FC_TM_FAILED 5 -/* - * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was - * terminated, so no more actions is needed and success should be returned - * to target. - */ -#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717 - #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) #define pci_dma_lo32(a) (a & 0xffffffff) #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) @@ -874,6 +885,15 @@ struct qla_tgt_sess_op { struct scsi_qla_host *vha; struct atio_from_isp atio; struct work_struct work; + struct list_head cmd_list; + bool aborted; +}; + +enum qla_sess_deletion { + QLA_SESS_DELETION_NONE = 0, + QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of + * this one */ + QLA_SESS_DELETION_IN_PROGRESS = 2, }; /* @@ -884,8 +904,15 @@ struct qla_tgt_sess { port_id_t s_id; unsigned int conf_compl_supported:1; - unsigned int deleted:1; + unsigned int deleted:2; unsigned int local:1; + unsigned int logout_on_delete:1; + unsigned int plogi_ack_needed:1; + unsigned int keep_nport_handle:1; + + unsigned char logout_completed; + + int generation; struct se_session *se_sess; struct scsi_qla_host *vha; @@ -897,6 +924,10 @@ struct qla_tgt_sess { uint8_t port_name[WWN_SIZE]; struct work_struct free_work; + + union { + struct imm_ntfy_from_isp tm_iocb; + }; }; struct qla_tgt_cmd { @@ -912,7 +943,6 @@ struct qla_tgt_cmd { unsigned int conf_compl_supported:1; unsigned int sg_mapped:1; unsigned int free_sg:1; - unsigned int aborted:1; /* Needed in case of SRR */ unsigned int write_data_transferred:1; unsigned int ctx_dsd_alloced:1; unsigned int q_full:1; @@ -961,6 +991,9 @@ struct qla_tgt_cmd { * BIT_14 - Back end data received/sent. * BIT_15 - SRR prepare ctio * BIT_16 - complete free + * BIT_17 - flush - qlt_abort_cmd_on_host_reset + * BIT_18 - completion w/abort status + * BIT_19 - completion w/unknown status */ uint32_t cmd_flags; }; @@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio { struct qla_tgt_cmd *cmd; }; +/* Check for Switch reserved address */ +#define IS_SW_RESV_ADDR(_s_id) \ + ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc)) + #define QLA_TGT_XMIT_DATA 1 #define QLA_TGT_XMIT_STATUS 2 #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) @@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64, extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct qla_tgt_sess *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); -extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); +extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); extern void qlt_exit(void); extern void qlt_update_vp_map(struct scsi_qla_host *, int); @@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) ha->host->active_mode |= MODE_INITIATOR; } +static inline uint32_t sid_to_key(const uint8_t *s_id) +{ + uint32_t key; + + key = (((unsigned long)s_id[0] << 16) | + ((unsigned long)s_id[1] << 8) | + (unsigned long)s_id[2]); + return key; +} + /* * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. */ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern void qlt_abort_cmd(struct qla_tgt_cmd *); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); @@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); extern void qlt_83xx_iospace_config(struct qla_hw_data *); extern int qlt_free_qfull_cmds(struct scsi_qla_host *); +extern void qlt_logo_completion_handler(fc_port_t *, int); +extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); #endif /* __QLA_TARGET_H */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index d9a8c6084346..9224a06646e6 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - + cmd->cmd_flags |= BIT_3; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, - 3000); + 3 * HZ); return 0; } spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); @@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->cmd_flags |= BIT_4; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; cmd->offset = 0; - cmd->cmd_flags |= BIT_3; cmd->prot_sg_cnt = se_cmd->t_prot_nents; cmd->prot_sg = se_cmd->t_prot_sg; @@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); if (cmd->cmd_flags & BIT_5) { pr_crit("Bit_5 already set for cmd = %p.\n", cmd); dump_stack(); @@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; - struct qla_hw_data *ha = vha->hw; - - if (!cmd->sg_mapped) - return; - - pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - cmd->sg_mapped = 0; + qlt_abort_cmd(cmd); } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, @@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( return NULL; } - key = (((unsigned long)s_id[0] << 16) | - ((unsigned long)s_id[1] << 8) | - (unsigned long)s_id[2]); + key = sid_to_key(s_id); pr_debug("find_sess_by_s_id: 0x%06x\n", key); se_nacl = btree_lookup32(&lport->lport_fcport_map, key); @@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( void *slot; int rc; - key = (((unsigned long)s_id[0] << 16) | - ((unsigned long)s_id[1] << 8) | - (unsigned long)s_id[2]); + key = sid_to_key(s_id); pr_debug("set_sess_by_s_id: %06x\n", key); slot = btree_lookup32(&lport->lport_fcport_map, key); @@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, } sess->conf_compl_supported = conf_compl_supported; + + /* Reset logout parameters to default */ + sess->logout_on_delete = 1; + sess->keep_nport_handle = 0; } /* diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 106884a5444e..cfadccef045c 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, scmd->sdb.length); scmd->sdb.table.sgl = &ses->sense_sgl; scmd->sc_data_direction = DMA_FROM_DEVICE; - scmd->sdb.table.nents = 1; + scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1; scmd->cmnd[0] = REQUEST_SENSE; scmd->cmnd[4] = scmd->sdb.length; scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b1a263137a23..448ebdaa3d69 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) { - if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) + if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) return; __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); } @@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) if (mq) { if (nents <= SCSI_MAX_SG_SEGMENTS) { - sdb->table.nents = nents; - sg_init_table(sdb->table.sgl, sdb->table.nents); + sdb->table.nents = sdb->table.orig_nents = nents; + sg_init_table(sdb->table.sgl, nents); return 0; } first_chunk = sdb->table.sgl; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 3b2fcb4fada0..a20da8c25b4f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk) max_xfer = sdkp->max_xfer_blocks; max_xfer <<= ilog2(sdp->sector_size) - 9; - max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), - max_xfer); - blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); + sdkp->disk->queue->limits.max_sectors = + min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); + set_capacity(disk, sdkp->capacity); sd_config_write_same(sdkp); kfree(buffer); diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c index bfa42620a3f6..940781183fac 100644 --- a/drivers/staging/comedi/drivers/das1800.c +++ b/drivers/staging/comedi/drivers/das1800.c @@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev) if (index == das1801hc || index == das1802hc) return board; index = das1801hc; + break; default: dev_err(dev->class_dev, "Board model: probe returned 0x%x (unknown, please report)\n", diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c index 9c934e6d2ea1..c61add46b426 100644 --- a/drivers/staging/lustre/lustre/obdclass/debug.c +++ b/drivers/staging/lustre/lustre/obdclass/debug.c @@ -40,7 +40,7 @@ #define DEBUG_SUBSYSTEM D_OTHER -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include "../include/obd_support.h" #include "../include/lustre_debug.h" diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index b0c8e235b982..69bdc8f29b59 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { - if (conf->assoc) { + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) && + priv->op_mode != NL80211_IFTYPE_AP) { + if (conf->assoc && conf->beacon_rate) { CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, conf->sync_tsf); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 4e68b62193ed..cd77a064c772 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3998,7 +3998,13 @@ get_immediate: } transport_err: - iscsit_take_action_for_connection_exit(conn); + /* + * Avoid the normal connection failure code-path if this connection + * is still within LOGIN mode, and iscsi_np process context is + * responsible for cleaning up the early connection failure. + */ + if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) + iscsit_take_action_for_connection_exit(conn); out: return 0; } @@ -4082,7 +4088,7 @@ reject: int iscsi_target_rx_thread(void *arg) { - int ret; + int ret, rc; u8 buffer[ISCSI_HDR_LEN], opcode; u32 checksum = 0, digest = 0; struct iscsi_conn *conn = arg; @@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg) * connection recovery / failure event can be triggered externally. */ allow_signal(SIGINT); + /* + * Wait for iscsi_post_login_handler() to complete before allowing + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); + if (rc < 0) + return 0; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { struct completion comp; - int rc; init_completion(&comp); rc = wait_for_completion_interruptible(&comp); @@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession( struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; - int sleep = cmpxchg(&conn->tx_thread_active, true, false); + int sleep = 1; + /* + * Traditional iscsi/tcp will invoke this logic from TX thread + * context during session logout, so clear tx_thread_active and + * sleep if iscsit_close_connection() has not already occured. + * + * Since iser-target invokes this logic from it's own workqueue, + * always sleep waiting for RX/TX thread shutdown to complete + * within iscsit_close_connection(). + */ + if (conn->conn_transport->transport_type == ISCSI_TCP) + sleep = cmpxchg(&conn->tx_thread_active, true, false); atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession( static void iscsit_logout_post_handler_samecid( struct iscsi_conn *conn) { - int sleep = cmpxchg(&conn->tx_thread_active, true, false); + int sleep = 1; + + if (conn->conn_transport->transport_type == ISCSI_TCP) + sleep = cmpxchg(&conn->tx_thread_active, true, false); atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) struct iscsi_session *sess; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; struct se_session *se_sess, *se_sess_tmp; + LIST_HEAD(free_list); int session_count = 0; spin_lock_bh(&se_tpg->session_lock); @@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) } atomic_set(&sess->session_reinstatement, 1); spin_unlock(&sess->conn_lock); - spin_unlock_bh(&se_tpg->session_lock); - iscsit_free_session(sess); - spin_lock_bh(&se_tpg->session_lock); + list_move_tail(&se_sess->sess_list, &free_list); + } + spin_unlock_bh(&se_tpg->session_lock); + + list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { + sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + iscsit_free_session(sess); session_count++; } - spin_unlock_bh(&se_tpg->session_lock); pr_debug("Released %d iSCSI Session(s) from Target Portal" " Group: %hu\n", session_count, tpg->tpgt); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 3d0fe4ff5590..7e8f65e5448f 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) init_completion(&conn->conn_logout_comp); init_completion(&conn->rx_half_close_comp); init_completion(&conn->tx_half_close_comp); + init_completion(&conn->rx_login_comp); spin_lock_init(&conn->cmd_lock); spin_lock_init(&conn->conn_usage_lock); spin_lock_init(&conn->immed_queue_lock); @@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn) iscsit_start_nopin_timer(conn); } -static int iscsit_start_kthreads(struct iscsi_conn *conn) +int iscsit_start_kthreads(struct iscsi_conn *conn) { int ret = 0; @@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn) return 0; out_tx: + send_sig(SIGINT, conn->tx_thread, 1); kthread_stop(conn->tx_thread); conn->tx_thread_active = false; out_bitmap: @@ -689,7 +691,7 @@ out_bitmap: return ret; } -int iscsi_post_login_handler( +void iscsi_post_login_handler( struct iscsi_np *np, struct iscsi_conn *conn, u8 zero_tsih) @@ -699,7 +701,6 @@ int iscsi_post_login_handler( struct se_session *se_sess = sess->se_sess; struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; - int rc; iscsit_inc_conn_usage_count(conn); @@ -739,10 +740,6 @@ int iscsi_post_login_handler( sess->sess_ops->InitiatorName); spin_unlock_bh(&sess->conn_lock); - rc = iscsit_start_kthreads(conn); - if (rc) - return rc; - iscsi_post_login_start_timers(conn); /* * Determine CPU mask to ensure connection's RX and TX kthreads @@ -751,15 +748,20 @@ int iscsi_post_login_handler( iscsit_thread_get_cpumask(conn); conn->conn_rx_reset_cpumask = 1; conn->conn_tx_reset_cpumask = 1; - + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); iscsit_dec_conn_usage_count(conn); + if (stop_timer) { spin_lock_bh(&se_tpg->session_lock); iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); } iscsit_dec_session_usage_count(sess); - return 0; + return; } iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); @@ -800,10 +802,6 @@ int iscsi_post_login_handler( " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); spin_unlock_bh(&se_tpg->session_lock); - rc = iscsit_start_kthreads(conn); - if (rc) - return rc; - iscsi_post_login_start_timers(conn); /* * Determine CPU mask to ensure connection's RX and TX kthreads @@ -812,10 +810,12 @@ int iscsi_post_login_handler( iscsit_thread_get_cpumask(conn); conn->conn_rx_reset_cpumask = 1; conn->conn_tx_reset_cpumask = 1; - + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); iscsit_dec_conn_usage_count(conn); - - return 0; } static void iscsi_handle_login_thread_timeout(unsigned long data) @@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (ret < 0) goto new_sess_out; - if (!conn->sess) { - pr_err("struct iscsi_conn session pointer is NULL!\n"); - goto new_sess_out; - } - iscsi_stop_login_thread_timer(np); - if (signal_pending(current)) - goto new_sess_out; - if (ret == 1) { tpg_np = conn->tpg_np; - ret = iscsi_post_login_handler(np, conn, zero_tsih); - if (ret < 0) - goto new_sess_out; - + iscsi_post_login_handler(np, conn, zero_tsih); iscsit_deaccess_np(np, tpg, tpg_np); } diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 1c7358081533..57aa0d0fd820 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); -extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); +extern int iscsit_start_kthreads(struct iscsi_conn *); +extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, bool, bool); extern int iscsi_target_login_thread(void *); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 8c02fa34716f..f9cde9141836 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -17,6 +17,7 @@ ******************************************************************************/ #include <linux/ctype.h> +#include <linux/kthread.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> @@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log ntohl(login_rsp->statsn), login->rsp_length); padding = ((-login->rsp_length) & 3); + /* + * Before sending the last login response containing the transition + * bit for full-feature-phase, go ahead and start up TX/RX threads + * now to avoid potential resource allocation failures after the + * final login response has been sent. + */ + if (login->login_complete) { + int rc = iscsit_start_kthreads(conn); + if (rc) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + } if (conn->conn_transport->iscsit_put_login_tx(conn, login, login->rsp_length + padding) < 0) - return -1; + goto err; login->rsp_length = 0; mutex_lock(&sess->cmdsn_mutex); @@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log mutex_unlock(&sess->cmdsn_mutex); return 0; + +err: + if (login->login_complete) { + if (conn->rx_thread && conn->rx_thread_active) { + send_sig(SIGINT, conn->rx_thread, 1); + kthread_stop(conn->rx_thread); + } + if (conn->tx_thread && conn->tx_thread_active) { + send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + } + spin_lock(&iscsit_global->ts_bitmap_lock); + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, + get_order(1)); + spin_unlock(&iscsit_global->ts_bitmap_lock); + } + return -1; } static void iscsi_target_sk_data_ready(struct sock *sk) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 0b0de3647478..c2e9fea90b4a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -747,7 +747,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da, if (!dev->transport->init_prot || !dev->transport->free_prot) { /* 0 is only allowed value for non-supporting backends */ if (flag == 0) - return 0; + return count; pr_err("DIF protection not supported by backend: %s\n", dev->transport->name); @@ -1590,9 +1590,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( u8 type = 0; if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) - return 0; + return count; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) - return 0; + return count; if (dev->export_count) { pr_debug("Unable to process APTPL metadata while" @@ -1658,22 +1658,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( * PR APTPL Metadata for Reservation */ case Opt_res_holder: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; res_holder = arg; break; case Opt_res_type: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; type = (u8)arg; break; case Opt_res_scope: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; break; case Opt_res_all_tg_pt: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; all_tg_pt = (int)arg; break; case Opt_mapped_lun: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; mapped_lun = (u64)arg; break; /* @@ -1701,14 +1711,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } break; case Opt_tpgt: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; tpgt = (u16)arg; break; case Opt_port_rtpi: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; break; case Opt_target_lun: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; target_lun = (u64)arg; break; default: @@ -1985,7 +2001,7 @@ static ssize_t target_core_store_alua_lu_gp( lu_gp_mem = dev->dev_alua_lu_gp_mem; if (!lu_gp_mem) - return 0; + return count; if (count > LU_GROUP_NAME_BUF) { pr_err("ALUA LU Group Alias too large!\n"); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 0fdbe43b7dad..5ab7100de17e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port( LIST_HEAD(tid_dest_list); struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; unsigned char *buf, *ptr, proto_ident; - const unsigned char *i_str; + const unsigned char *i_str = NULL; char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; sense_reason_t ret; u32 tpdl, tid_len = 0; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 4703f403f31c..384cf8894411 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev) dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; dev->dev_attrib.hw_max_sectors = UINT_MAX; dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; + dev->dev_attrib.is_nonrot = 1; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index b0744433315a..b5ba1ec3c354 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) buf[4] = 0x5; else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || - cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) + cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) buf[4] = 0x4; } + /* logical unit supports type 1 and type 3 protection */ + if ((dev->transport->get_device_type(dev) == TYPE_DISK) && + (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) && + (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) { + buf[4] |= (0x3 << 3); + } + /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index d5dd357ba57c..b49f97c734d0 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops, static struct platform_driver hisi_thermal_driver = { .driver = { .name = "hisi_thermal", - .owner = THIS_MODULE, .pm = &hisi_thermal_pm_ops, .of_match_table = of_hisi_thermal_match, }, diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 4672250b329f..63a448f9d93b 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz, struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; u32 *req_power, *max_power, *granted_power, *extra_actor_power; - u32 total_req_power, max_allocatable_power; + u32 *weighted_req_power; + u32 total_req_power, max_allocatable_power, total_weighted_req_power; u32 total_granted_power, power_range; int i, num_actors, total_weight, ret = 0; int trip_max_desired_temperature = params->trip_max_desired_temperature; @@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz, } /* - * We need to allocate three arrays of the same size: - * req_power, max_power and granted_power. They are going to - * be needed until this function returns. Allocate them all - * in one go to simplify the allocation and deallocation - * logic. + * We need to allocate five arrays of the same size: + * req_power, max_power, granted_power, extra_actor_power and + * weighted_req_power. They are going to be needed until this + * function returns. Allocate them all in one go to simplify + * the allocation and deallocation logic. */ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); - req_power = devm_kcalloc(&tz->device, num_actors * 4, + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); + req_power = devm_kcalloc(&tz->device, num_actors * 5, sizeof(*req_power), GFP_KERNEL); if (!req_power) { ret = -ENOMEM; @@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz, max_power = &req_power[num_actors]; granted_power = &req_power[2 * num_actors]; extra_actor_power = &req_power[3 * num_actors]; + weighted_req_power = &req_power[4 * num_actors]; i = 0; + total_weighted_req_power = 0; total_req_power = 0; max_allocatable_power = 0; @@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz, else weight = instance->weight; - req_power[i] = frac_to_int(weight * req_power[i]); + weighted_req_power[i] = frac_to_int(weight * req_power[i]); if (power_actor_get_max_power(cdev, tz, &max_power[i])) continue; total_req_power += req_power[i]; max_allocatable_power += max_power[i]; + total_weighted_req_power += weighted_req_power[i]; i++; } @@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz, power_range = pid_controller(tz, current_temp, control_temp, max_allocatable_power); - divvy_up_power(req_power, max_power, num_actors, total_req_power, - power_range, granted_power, extra_actor_power); + divvy_up_power(weighted_req_power, max_power, num_actors, + total_weighted_req_power, power_range, granted_power, + extra_actor_power); total_granted_power = 0; i = 0; diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig index c8e35c1a43dc..e0da3865e060 100644 --- a/drivers/thermal/samsung/Kconfig +++ b/drivers/thermal/samsung/Kconfig @@ -1,6 +1,6 @@ config EXYNOS_THERMAL tristate "Exynos thermal management unit driver" - depends on OF + depends on THERMAL_OF help If you say yes here you get support for the TMU (Thermal Management Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 531f4b179871..c96ff10b869e 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = { static int exynos_tmu_probe(struct platform_device *pdev) { - struct exynos_tmu_platform_data *pdata; struct exynos_tmu_data *data; int ret; @@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) if (ret) goto err_sensor; - pdata = data->pdata; - INIT_WORK(&data->irq_work, exynos_tmu_work); data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); @@ -1392,6 +1389,8 @@ err_clk_sec: if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); err_sensor: + if (!IS_ERR_OR_NULL(data->regulator)) + regulator_disable(data->regulator); thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); return ret; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 04659bfb888b..4ca211be4c0f 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, return -ENODEV; unbind: + device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); release_idr(&tz->idr, &tz->lock, pos->id); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 74fea4fa41b1..3ad48e1c0c57 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = { }, }; -module_platform_driver(ci_hdrc_driver); +static int __init ci_hdrc_platform_register(void) +{ + ci_hdrc_host_driver_init(); + return platform_driver_register(&ci_hdrc_driver); +} +module_init(ci_hdrc_platform_register); + +static void __exit ci_hdrc_platform_unregister(void) +{ + platform_driver_unregister(&ci_hdrc_driver); +} +module_exit(ci_hdrc_platform_unregister); MODULE_ALIAS("platform:ci_hdrc"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 6cf87b8b13a8..7161439def19 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci) rdrv->name = "host"; ci->roles[CI_ROLE_HOST] = rdrv; + return 0; +} + +void ci_hdrc_host_driver_init(void) +{ ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; - - return 0; } diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h index 5707bf379bfb..0f12f131bdd3 100644 --- a/drivers/usb/chipidea/host.h +++ b/drivers/usb/chipidea/host.h @@ -5,6 +5,7 @@ int ci_hdrc_host_init(struct ci_hdrc *ci); void ci_hdrc_host_destroy(struct ci_hdrc *ci); +void ci_hdrc_host_driver_init(void); #else @@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci) } +static void ci_hdrc_host_driver_init(void) +{ + +} + #endif #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index f7f35a36c09a..6df9715a4bcd 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -699,6 +699,10 @@ static inline int hidg_get_minor(void) int ret; ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); + if (ret >= HIDG_MINORS) { + ida_simple_remove(&hidg_ida, ret); + ret = -ENODEV; + } return ret; } diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 44173df27273..357f63f47b42 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = { static inline int gprinter_get_minor(void) { - return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + int ret; + + ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + if (ret >= PRINTER_MINORS) { + ida_simple_remove(&printer_ida, ret); + ret = -ENODEV; + } + + return ret; } static inline void gprinter_put_minor(int minor) diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 6d3eb8b00a48..531861547253 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt) factor = 1000; } else { ep_desc = &hs_epin_desc; - factor = 125; + factor = 8000; } /* pre-compute some values for iso_complete() */ uac2->p_framesize = opts->p_ssize * num_channels(opts->p_chmask); rate = opts->p_srate * uac2->p_framesize; - uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; + uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1)); uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, prm->max_psize); diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index b04980cf6dc4..1efa61265d8d 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) /* The current hw dequeue pointer */ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); deq_ptr_64 = tmp_32; - tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); + tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0)); deq_ptr_64 |= ((u64)tmp_32 << 32); /* we have the dma addr of next bd that will be fetched by hardware */ diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 362ee8af5fce..89ed5e71a199 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -323,6 +323,7 @@ err4: err3: put_device(&udc->dev); + device_del(&gadget->dev); err2: put_device(&gadget->dev); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3e442f77a2b9..9a8c936cd42c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) int size; int i, j, num_ports; - del_timer_sync(&xhci->cmd_timer); + if (timer_pending(&xhci->cmd_timer)) + del_timer_sync(&xhci->cmd_timer); /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6a8fc52aed58..32f4d564494a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return 0; /* offset in TRBs */ segment_offset = trb - seg->trbs; - if (segment_offset > TRBS_PER_SEGMENT) + if (segment_offset >= TRBS_PER_SEGMENT) return 0; return seg->dma + (segment_offset * sizeof(*trb)); } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 19b85ee98a72..876423b8892c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff), + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 9c63897b3a56..d156545728c2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ - {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ @@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 46179a0828eb..07d1ecd564f7 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */ /* AT&T Direct IP LTE modems */ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 2fb29dfeffbd..563c510f285c 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev) } EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); +static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, + char *buf) +{ + struct vfio_device *device; + + mutex_lock(&group->device_lock); + list_for_each_entry(device, &group->device_list, group_next) { + if (!strcmp(dev_name(device->dev), buf)) { + vfio_device_get(device); + break; + } + } + mutex_unlock(&group->device_lock); + + return device; +} + /* * Caller must hold a reference to the vfio_device */ @@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) { struct vfio_device *device; struct file *filep; - int ret = -ENODEV; + int ret; if (0 == atomic_read(&group->container_users) || !group->container->iommu_driver || !vfio_group_viable(group)) return -EINVAL; - mutex_lock(&group->device_lock); - list_for_each_entry(device, &group->device_list, group_next) { - if (strcmp(dev_name(device->dev), buf)) - continue; + device = vfio_device_get_from_name(group, buf); + if (!device) + return -ENODEV; - ret = device->ops->open(device->device_data); - if (ret) - break; - /* - * We can't use anon_inode_getfd() because we need to modify - * the f_mode flags directly to allow more than just ioctls - */ - ret = get_unused_fd_flags(O_CLOEXEC); - if (ret < 0) { - device->ops->release(device->device_data); - break; - } + ret = device->ops->open(device->device_data); + if (ret) { + vfio_device_put(device); + return ret; + } - filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, - device, O_RDWR); - if (IS_ERR(filep)) { - put_unused_fd(ret); - ret = PTR_ERR(filep); - device->ops->release(device->device_data); - break; - } + /* + * We can't use anon_inode_getfd() because we need to modify + * the f_mode flags directly to allow more than just ioctls + */ + ret = get_unused_fd_flags(O_CLOEXEC); + if (ret < 0) { + device->ops->release(device->device_data); + vfio_device_put(device); + return ret; + } - /* - * TODO: add an anon_inode interface to do this. - * Appears to be missing by lack of need rather than - * explicitly prevented. Now there's need. - */ - filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, + device, O_RDWR); + if (IS_ERR(filep)) { + put_unused_fd(ret); + ret = PTR_ERR(filep); + device->ops->release(device->device_data); + vfio_device_put(device); + return ret; + } + + /* + * TODO: add an anon_inode interface to do this. + * Appears to be missing by lack of need rather than + * explicitly prevented. Now there's need. + */ + filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - vfio_device_get(device); - atomic_inc(&group->container_users); + atomic_inc(&group->container_users); - fd_install(ret, filep); - break; - } - mutex_unlock(&group->device_lock); + fd_install(ret, filep); return ret; } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a9fe859f43c8..eec2f11809ff 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -683,11 +683,8 @@ static void *vhost_kvzalloc(unsigned long size) { void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); - if (!n) { + if (!n) n = vzalloc(size); - if (!n) - return ERR_PTR(-ENOMEM); - } return n; } @@ -995,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) } if (eventfp != d->log_file) { filep = d->log_file; + d->log_file = eventfp; ctx = d->log_ctx; d->log_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 658c34bb9076..1aaf89300621 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode) int y; int c = scr_readw((u16 *) vc->vc_pos); + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) return; - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); if (vc->vc_cursor_type & 0x10) fbcon_del_cursor_timer(info); else diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 2d98de535e0f..f888561568d9 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -298,7 +298,7 @@ config FB_ARMCLCD # Helper logic selected only by the ARM Versatile platform family. config PLAT_VERSATILE_CLCD - def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS + def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR depends on ARM depends on FB_ARMCLCD && FB=y diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c index 928ee639c0c1..bf407b6ba15c 100644 --- a/drivers/video/fbdev/omap2/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/dss/dss-of.c @@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent, } prev = port; } while (of_node_cmp(port->name, "port") != 0); + + of_node_put(ports); } return port; @@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port) if (!port) return NULL; - np = of_get_next_parent(port); + np = of_get_parent(port); for (i = 0; i < 2 && np; ++i) { struct property *prop; diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 86bd457d039d..50bce45e7f3d 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) goto err_free_dma; } - ret = clk_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); if (ret < 0) { dev_err(dev, "failed to enable clock\n"); goto err_misc_deregister; @@ -685,7 +685,7 @@ err_misc_deregister: misc_deregister(&priv->misc_dev); err_disable_clk: - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c index 111c2d1911d3..b5102aa6090d 100644 --- a/drivers/video/of_videomode.c +++ b/drivers/video/of_videomode.c @@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm, index = disp->native_mode; ret = videomode_from_timings(disp, vm, index); - if (ret) - return ret; display_timings_release(disp); - return 0; + return ret; } EXPORT_SYMBOL_GPL(of_get_videomode); diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 60e2a1677563..c96944b59856 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -313,6 +313,7 @@ err_init_vq: static void virtinput_remove(struct virtio_device *vdev) { struct virtio_input *vi = vdev->priv; + void *buf; unsigned long flags; spin_lock_irqsave(&vi->lock, flags); @@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev) spin_unlock_irqrestore(&vi->lock, flags); input_unregister_device(vi->idev); + vdev->config->reset(vdev); + while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL) + kfree(buf); vdev->config->del_vqs(vdev); kfree(vi); } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index fd933695f232..bf4a23c7c591 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) } /* - * We avoid multiple worker processes conflicting via the balloon mutex. + * As this is a work item it is guaranteed to run as a single instance only. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. @@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work) enum bp_state state = BP_DONE; long credit; - mutex_lock(&balloon_mutex); do { + mutex_lock(&balloon_mutex); + credit = current_credit(); if (credit > 0) { @@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work) state = update_schedule(state); -#ifndef CONFIG_PREEMPT - if (need_resched()) - schedule(); -#endif + mutex_unlock(&balloon_mutex); + + cond_resched(); + } while (credit && state == BP_DONE); /* Schedule more work if there is some still to be done. */ if (state == BP_EAGAIN) schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); - - mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 67b9163db718..0dbb222daaf1 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) pr_debug("priv %p\n", priv); + mutex_lock(&priv->lock); while (!list_empty(&priv->maps)) { map = list_entry(priv->maps.next, struct grant_map, next); list_del(&map->next); gntdev_put_map(NULL /* already removed */, map); } WARN_ON(!list_empty(&priv->freeable_maps)); + mutex_unlock(&priv->lock); if (use_ptemod) mmu_notifier_unregister(&priv->mn, priv->mm); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 9ad327238ba9..e30353575d5d 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, addrs); - if (!rv) + if (!rv) { vunmap(vaddr); + free_xenballooned_pages(node->nr_handles, node->hvm.pages); + } else WARN(1, "Leaking %p, size %u page(s)\n", vaddr, node->nr_handles); |